[PATCH] Create fs/utimes.c
[deliverable/linux.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49
50 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
51 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
52 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
53
54 /* Embedded module documentation macros - see modules.h */
55 MODULE_AUTHOR("Hewlett-Packard Company");
56 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
57 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
58 " SA6i P600 P800 P400 P400i E200 E200i E500");
59 MODULE_LICENSE("GPL");
60
61 #include "cciss_cmd.h"
62 #include "cciss.h"
63 #include <linux/cciss_ioctl.h>
64
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
86 {0,}
87 };
88
89 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
90
91 /* board_id = Subsystem Device ID & Vendor ID
92 * product = Marketing Name for the board
93 * access = Address of the struct of function pointers
94 */
95 static struct board_type products[] = {
96 {0x40700E11, "Smart Array 5300", &SA5_access},
97 {0x40800E11, "Smart Array 5i", &SA5B_access},
98 {0x40820E11, "Smart Array 532", &SA5B_access},
99 {0x40830E11, "Smart Array 5312", &SA5B_access},
100 {0x409A0E11, "Smart Array 641", &SA5_access},
101 {0x409B0E11, "Smart Array 642", &SA5_access},
102 {0x409C0E11, "Smart Array 6400", &SA5_access},
103 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
104 {0x40910E11, "Smart Array 6i", &SA5_access},
105 {0x3225103C, "Smart Array P600", &SA5_access},
106 {0x3223103C, "Smart Array P800", &SA5_access},
107 {0x3234103C, "Smart Array P400", &SA5_access},
108 {0x3235103C, "Smart Array P400i", &SA5_access},
109 {0x3211103C, "Smart Array E200i", &SA5_access},
110 {0x3212103C, "Smart Array E200", &SA5_access},
111 {0x3213103C, "Smart Array E200i", &SA5_access},
112 {0x3214103C, "Smart Array E200i", &SA5_access},
113 {0x3215103C, "Smart Array E200i", &SA5_access},
114 {0x3233103C, "Smart Array E500", &SA5_access},
115 };
116
117 /* How long to wait (in milliseconds) for board to go into simple mode */
118 #define MAX_CONFIG_WAIT 30000
119 #define MAX_IOCTL_CONFIG_WAIT 1000
120
121 /*define how many times we will try a command because of bus resets */
122 #define MAX_CMD_RETRIES 3
123
124 #define READ_AHEAD 1024
125 #define NR_CMDS 384 /* #commands that can be outstanding */
126 #define MAX_CTLR 32
127
128 /* Originally cciss driver only supports 8 major numbers */
129 #define MAX_CTLR_ORIG 8
130
131 static ctlr_info_t *hba[MAX_CTLR];
132
133 static void do_cciss_request(request_queue_t *q);
134 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
135 static int cciss_open(struct inode *inode, struct file *filep);
136 static int cciss_release(struct inode *inode, struct file *filep);
137 static int cciss_ioctl(struct inode *inode, struct file *filep,
138 unsigned int cmd, unsigned long arg);
139 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
140
141 static int revalidate_allvol(ctlr_info_t *host);
142 static int cciss_revalidate(struct gendisk *disk);
143 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
144 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
145 int clear_all);
146
147 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
148 int withirq, unsigned int *total_size,
149 unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol, int withirq,
151 unsigned int total_size,
152 unsigned int block_size,
153 InquiryData_struct *inq_buff,
154 drive_info_struct *drv);
155 static void cciss_getgeometry(int cntl_num);
156 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
157 __u32);
158 static void start_io(ctlr_info_t *h);
159 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
160 unsigned int use_unit_num, unsigned int log_unit,
161 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
162 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
163 unsigned int use_unit_num, unsigned int log_unit,
164 __u8 page_code, int cmd_type);
165
166 static void fail_all_cmds(unsigned long ctlr);
167
168 #ifdef CONFIG_PROC_FS
169 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
170 int length, int *eof, void *data);
171 static void cciss_procinit(int i);
172 #else
173 static void cciss_procinit(int i)
174 {
175 }
176 #endif /* CONFIG_PROC_FS */
177
178 #ifdef CONFIG_COMPAT
179 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
180 #endif
181
182 static struct block_device_operations cciss_fops = {
183 .owner = THIS_MODULE,
184 .open = cciss_open,
185 .release = cciss_release,
186 .ioctl = cciss_ioctl,
187 .getgeo = cciss_getgeo,
188 #ifdef CONFIG_COMPAT
189 .compat_ioctl = cciss_compat_ioctl,
190 #endif
191 .revalidate_disk = cciss_revalidate,
192 };
193
194 /*
195 * Enqueuing and dequeuing functions for cmdlists.
196 */
197 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
198 {
199 if (*Qptr == NULL) {
200 *Qptr = c;
201 c->next = c->prev = c;
202 } else {
203 c->prev = (*Qptr)->prev;
204 c->next = (*Qptr);
205 (*Qptr)->prev->next = c;
206 (*Qptr)->prev = c;
207 }
208 }
209
210 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
211 CommandList_struct *c)
212 {
213 if (c && c->next != c) {
214 if (*Qptr == c)
215 *Qptr = c->next;
216 c->prev->next = c->next;
217 c->next->prev = c->prev;
218 } else {
219 *Qptr = NULL;
220 }
221 return c;
222 }
223
224 #include "cciss_scsi.c" /* For SCSI tape support */
225
226 #ifdef CONFIG_PROC_FS
227
228 /*
229 * Report information about this controller.
230 */
231 #define ENG_GIG 1000000000
232 #define ENG_GIG_FACTOR (ENG_GIG/512)
233 #define RAID_UNKNOWN 6
234 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
235 "UNKNOWN"
236 };
237
238 static struct proc_dir_entry *proc_cciss;
239
240 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
241 int length, int *eof, void *data)
242 {
243 off_t pos = 0;
244 off_t len = 0;
245 int size, i, ctlr;
246 ctlr_info_t *h = (ctlr_info_t *) data;
247 drive_info_struct *drv;
248 unsigned long flags;
249 sector_t vol_sz, vol_sz_frac;
250
251 ctlr = h->ctlr;
252
253 /* prevent displaying bogus info during configuration
254 * or deconfiguration of a logical volume
255 */
256 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
257 if (h->busy_configuring) {
258 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
259 return -EBUSY;
260 }
261 h->busy_configuring = 1;
262 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
263
264 size = sprintf(buffer, "%s: HP %s Controller\n"
265 "Board ID: 0x%08lx\n"
266 "Firmware Version: %c%c%c%c\n"
267 "IRQ: %d\n"
268 "Logical drives: %d\n"
269 "Current Q depth: %d\n"
270 "Current # commands on controller: %d\n"
271 "Max Q depth since init: %d\n"
272 "Max # commands on controller since init: %d\n"
273 "Max SG entries since init: %d\n\n",
274 h->devname,
275 h->product_name,
276 (unsigned long)h->board_id,
277 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
278 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
279 h->num_luns, h->Qdepth, h->commands_outstanding,
280 h->maxQsinceinit, h->max_outstanding, h->maxSG);
281
282 pos += size;
283 len += size;
284 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
285 for (i = 0; i <= h->highest_lun; i++) {
286
287 drv = &h->drv[i];
288 if (drv->heads == 0)
289 continue;
290
291 vol_sz = drv->nr_blocks;
292 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
293 vol_sz_frac *= 100;
294 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
295
296 if (drv->raid_level > 5)
297 drv->raid_level = RAID_UNKNOWN;
298 size = sprintf(buffer + len, "cciss/c%dd%d:"
299 "\t%4u.%02uGB\tRAID %s\n",
300 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
301 raid_label[drv->raid_level]);
302 pos += size;
303 len += size;
304 }
305
306 *eof = 1;
307 *start = buffer + offset;
308 len -= offset;
309 if (len > length)
310 len = length;
311 h->busy_configuring = 0;
312 return len;
313 }
314
315 static int
316 cciss_proc_write(struct file *file, const char __user *buffer,
317 unsigned long count, void *data)
318 {
319 unsigned char cmd[80];
320 int len;
321 #ifdef CONFIG_CISS_SCSI_TAPE
322 ctlr_info_t *h = (ctlr_info_t *) data;
323 int rc;
324 #endif
325
326 if (count > sizeof(cmd) - 1)
327 return -EINVAL;
328 if (copy_from_user(cmd, buffer, count))
329 return -EFAULT;
330 cmd[count] = '\0';
331 len = strlen(cmd); // above 3 lines ensure safety
332 if (len && cmd[len - 1] == '\n')
333 cmd[--len] = '\0';
334 # ifdef CONFIG_CISS_SCSI_TAPE
335 if (strcmp("engage scsi", cmd) == 0) {
336 rc = cciss_engage_scsi(h->ctlr);
337 if (rc != 0)
338 return -rc;
339 return count;
340 }
341 /* might be nice to have "disengage" too, but it's not
342 safely possible. (only 1 module use count, lock issues.) */
343 # endif
344 return -EINVAL;
345 }
346
347 /*
348 * Get us a file in /proc/cciss that says something about each controller.
349 * Create /proc/cciss if it doesn't exist yet.
350 */
351 static void __devinit cciss_procinit(int i)
352 {
353 struct proc_dir_entry *pde;
354
355 if (proc_cciss == NULL) {
356 proc_cciss = proc_mkdir("cciss", proc_root_driver);
357 if (!proc_cciss)
358 return;
359 }
360
361 pde = create_proc_read_entry(hba[i]->devname,
362 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
363 proc_cciss, cciss_proc_get_info, hba[i]);
364 pde->write_proc = cciss_proc_write;
365 }
366 #endif /* CONFIG_PROC_FS */
367
368 /*
369 * For operations that cannot sleep, a command block is allocated at init,
370 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
371 * which ones are free or in use. For operations that can wait for kmalloc
372 * to possible sleep, this routine can be called with get_from_pool set to 0.
373 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
374 */
375 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
376 {
377 CommandList_struct *c;
378 int i;
379 u64bit temp64;
380 dma_addr_t cmd_dma_handle, err_dma_handle;
381
382 if (!get_from_pool) {
383 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
384 sizeof(CommandList_struct), &cmd_dma_handle);
385 if (c == NULL)
386 return NULL;
387 memset(c, 0, sizeof(CommandList_struct));
388
389 c->cmdindex = -1;
390
391 c->err_info = (ErrorInfo_struct *)
392 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
393 &err_dma_handle);
394
395 if (c->err_info == NULL) {
396 pci_free_consistent(h->pdev,
397 sizeof(CommandList_struct), c, cmd_dma_handle);
398 return NULL;
399 }
400 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
401 } else { /* get it out of the controllers pool */
402
403 do {
404 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
405 if (i == NR_CMDS)
406 return NULL;
407 } while (test_and_set_bit
408 (i & (BITS_PER_LONG - 1),
409 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
410 #ifdef CCISS_DEBUG
411 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
412 #endif
413 c = h->cmd_pool + i;
414 memset(c, 0, sizeof(CommandList_struct));
415 cmd_dma_handle = h->cmd_pool_dhandle
416 + i * sizeof(CommandList_struct);
417 c->err_info = h->errinfo_pool + i;
418 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
419 err_dma_handle = h->errinfo_pool_dhandle
420 + i * sizeof(ErrorInfo_struct);
421 h->nr_allocs++;
422
423 c->cmdindex = i;
424 }
425
426 c->busaddr = (__u32) cmd_dma_handle;
427 temp64.val = (__u64) err_dma_handle;
428 c->ErrDesc.Addr.lower = temp64.val32.lower;
429 c->ErrDesc.Addr.upper = temp64.val32.upper;
430 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
431
432 c->ctlr = h->ctlr;
433 return c;
434 }
435
436 /*
437 * Frees a command block that was previously allocated with cmd_alloc().
438 */
439 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
440 {
441 int i;
442 u64bit temp64;
443
444 if (!got_from_pool) {
445 temp64.val32.lower = c->ErrDesc.Addr.lower;
446 temp64.val32.upper = c->ErrDesc.Addr.upper;
447 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
448 c->err_info, (dma_addr_t) temp64.val);
449 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
450 c, (dma_addr_t) c->busaddr);
451 } else {
452 i = c - h->cmd_pool;
453 clear_bit(i & (BITS_PER_LONG - 1),
454 h->cmd_pool_bits + (i / BITS_PER_LONG));
455 h->nr_frees++;
456 }
457 }
458
459 static inline ctlr_info_t *get_host(struct gendisk *disk)
460 {
461 return disk->queue->queuedata;
462 }
463
464 static inline drive_info_struct *get_drv(struct gendisk *disk)
465 {
466 return disk->private_data;
467 }
468
469 /*
470 * Open. Make sure the device is really there.
471 */
472 static int cciss_open(struct inode *inode, struct file *filep)
473 {
474 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
475 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
476
477 #ifdef CCISS_DEBUG
478 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
479 #endif /* CCISS_DEBUG */
480
481 if (host->busy_initializing || drv->busy_configuring)
482 return -EBUSY;
483 /*
484 * Root is allowed to open raw volume zero even if it's not configured
485 * so array config can still work. Root is also allowed to open any
486 * volume that has a LUN ID, so it can issue IOCTL to reread the
487 * disk information. I don't think I really like this
488 * but I'm already using way to many device nodes to claim another one
489 * for "raw controller".
490 */
491 if (drv->nr_blocks == 0) {
492 if (iminor(inode) != 0) { /* not node 0? */
493 /* if not node 0 make sure it is a partition = 0 */
494 if (iminor(inode) & 0x0f) {
495 return -ENXIO;
496 /* if it is, make sure we have a LUN ID */
497 } else if (drv->LunID == 0) {
498 return -ENXIO;
499 }
500 }
501 if (!capable(CAP_SYS_ADMIN))
502 return -EPERM;
503 }
504 drv->usage_count++;
505 host->usage_count++;
506 return 0;
507 }
508
509 /*
510 * Close. Sync first.
511 */
512 static int cciss_release(struct inode *inode, struct file *filep)
513 {
514 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
515 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
516
517 #ifdef CCISS_DEBUG
518 printk(KERN_DEBUG "cciss_release %s\n",
519 inode->i_bdev->bd_disk->disk_name);
520 #endif /* CCISS_DEBUG */
521
522 drv->usage_count--;
523 host->usage_count--;
524 return 0;
525 }
526
527 #ifdef CONFIG_COMPAT
528
529 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
530 {
531 int ret;
532 lock_kernel();
533 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
534 unlock_kernel();
535 return ret;
536 }
537
538 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
539 unsigned long arg);
540 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
541 unsigned long arg);
542
543 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
544 {
545 switch (cmd) {
546 case CCISS_GETPCIINFO:
547 case CCISS_GETINTINFO:
548 case CCISS_SETINTINFO:
549 case CCISS_GETNODENAME:
550 case CCISS_SETNODENAME:
551 case CCISS_GETHEARTBEAT:
552 case CCISS_GETBUSTYPES:
553 case CCISS_GETFIRMVER:
554 case CCISS_GETDRIVVER:
555 case CCISS_REVALIDVOLS:
556 case CCISS_DEREGDISK:
557 case CCISS_REGNEWDISK:
558 case CCISS_REGNEWD:
559 case CCISS_RESCANDISK:
560 case CCISS_GETLUNINFO:
561 return do_ioctl(f, cmd, arg);
562
563 case CCISS_PASSTHRU32:
564 return cciss_ioctl32_passthru(f, cmd, arg);
565 case CCISS_BIG_PASSTHRU32:
566 return cciss_ioctl32_big_passthru(f, cmd, arg);
567
568 default:
569 return -ENOIOCTLCMD;
570 }
571 }
572
573 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
574 unsigned long arg)
575 {
576 IOCTL32_Command_struct __user *arg32 =
577 (IOCTL32_Command_struct __user *) arg;
578 IOCTL_Command_struct arg64;
579 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
580 int err;
581 u32 cp;
582
583 err = 0;
584 err |=
585 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
586 sizeof(arg64.LUN_info));
587 err |=
588 copy_from_user(&arg64.Request, &arg32->Request,
589 sizeof(arg64.Request));
590 err |=
591 copy_from_user(&arg64.error_info, &arg32->error_info,
592 sizeof(arg64.error_info));
593 err |= get_user(arg64.buf_size, &arg32->buf_size);
594 err |= get_user(cp, &arg32->buf);
595 arg64.buf = compat_ptr(cp);
596 err |= copy_to_user(p, &arg64, sizeof(arg64));
597
598 if (err)
599 return -EFAULT;
600
601 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
602 if (err)
603 return err;
604 err |=
605 copy_in_user(&arg32->error_info, &p->error_info,
606 sizeof(arg32->error_info));
607 if (err)
608 return -EFAULT;
609 return err;
610 }
611
612 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
613 unsigned long arg)
614 {
615 BIG_IOCTL32_Command_struct __user *arg32 =
616 (BIG_IOCTL32_Command_struct __user *) arg;
617 BIG_IOCTL_Command_struct arg64;
618 BIG_IOCTL_Command_struct __user *p =
619 compat_alloc_user_space(sizeof(arg64));
620 int err;
621 u32 cp;
622
623 err = 0;
624 err |=
625 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
626 sizeof(arg64.LUN_info));
627 err |=
628 copy_from_user(&arg64.Request, &arg32->Request,
629 sizeof(arg64.Request));
630 err |=
631 copy_from_user(&arg64.error_info, &arg32->error_info,
632 sizeof(arg64.error_info));
633 err |= get_user(arg64.buf_size, &arg32->buf_size);
634 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
635 err |= get_user(cp, &arg32->buf);
636 arg64.buf = compat_ptr(cp);
637 err |= copy_to_user(p, &arg64, sizeof(arg64));
638
639 if (err)
640 return -EFAULT;
641
642 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
643 if (err)
644 return err;
645 err |=
646 copy_in_user(&arg32->error_info, &p->error_info,
647 sizeof(arg32->error_info));
648 if (err)
649 return -EFAULT;
650 return err;
651 }
652 #endif
653
654 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
655 {
656 drive_info_struct *drv = get_drv(bdev->bd_disk);
657
658 if (!drv->cylinders)
659 return -ENXIO;
660
661 geo->heads = drv->heads;
662 geo->sectors = drv->sectors;
663 geo->cylinders = drv->cylinders;
664 return 0;
665 }
666
667 /*
668 * ioctl
669 */
670 static int cciss_ioctl(struct inode *inode, struct file *filep,
671 unsigned int cmd, unsigned long arg)
672 {
673 struct block_device *bdev = inode->i_bdev;
674 struct gendisk *disk = bdev->bd_disk;
675 ctlr_info_t *host = get_host(disk);
676 drive_info_struct *drv = get_drv(disk);
677 int ctlr = host->ctlr;
678 void __user *argp = (void __user *)arg;
679
680 #ifdef CCISS_DEBUG
681 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
682 #endif /* CCISS_DEBUG */
683
684 switch (cmd) {
685 case CCISS_GETPCIINFO:
686 {
687 cciss_pci_info_struct pciinfo;
688
689 if (!arg)
690 return -EINVAL;
691 pciinfo.domain = pci_domain_nr(host->pdev->bus);
692 pciinfo.bus = host->pdev->bus->number;
693 pciinfo.dev_fn = host->pdev->devfn;
694 pciinfo.board_id = host->board_id;
695 if (copy_to_user
696 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
697 return -EFAULT;
698 return 0;
699 }
700 case CCISS_GETINTINFO:
701 {
702 cciss_coalint_struct intinfo;
703 if (!arg)
704 return -EINVAL;
705 intinfo.delay =
706 readl(&host->cfgtable->HostWrite.CoalIntDelay);
707 intinfo.count =
708 readl(&host->cfgtable->HostWrite.CoalIntCount);
709 if (copy_to_user
710 (argp, &intinfo, sizeof(cciss_coalint_struct)))
711 return -EFAULT;
712 return 0;
713 }
714 case CCISS_SETINTINFO:
715 {
716 cciss_coalint_struct intinfo;
717 unsigned long flags;
718 int i;
719
720 if (!arg)
721 return -EINVAL;
722 if (!capable(CAP_SYS_ADMIN))
723 return -EPERM;
724 if (copy_from_user
725 (&intinfo, argp, sizeof(cciss_coalint_struct)))
726 return -EFAULT;
727 if ((intinfo.delay == 0) && (intinfo.count == 0))
728 {
729 // printk("cciss_ioctl: delay and count cannot be 0\n");
730 return -EINVAL;
731 }
732 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
733 /* Update the field, and then ring the doorbell */
734 writel(intinfo.delay,
735 &(host->cfgtable->HostWrite.CoalIntDelay));
736 writel(intinfo.count,
737 &(host->cfgtable->HostWrite.CoalIntCount));
738 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
739
740 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
741 if (!(readl(host->vaddr + SA5_DOORBELL)
742 & CFGTBL_ChangeReq))
743 break;
744 /* delay and try again */
745 udelay(1000);
746 }
747 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
748 if (i >= MAX_IOCTL_CONFIG_WAIT)
749 return -EAGAIN;
750 return 0;
751 }
752 case CCISS_GETNODENAME:
753 {
754 NodeName_type NodeName;
755 int i;
756
757 if (!arg)
758 return -EINVAL;
759 for (i = 0; i < 16; i++)
760 NodeName[i] =
761 readb(&host->cfgtable->ServerName[i]);
762 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
763 return -EFAULT;
764 return 0;
765 }
766 case CCISS_SETNODENAME:
767 {
768 NodeName_type NodeName;
769 unsigned long flags;
770 int i;
771
772 if (!arg)
773 return -EINVAL;
774 if (!capable(CAP_SYS_ADMIN))
775 return -EPERM;
776
777 if (copy_from_user
778 (NodeName, argp, sizeof(NodeName_type)))
779 return -EFAULT;
780
781 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
782
783 /* Update the field, and then ring the doorbell */
784 for (i = 0; i < 16; i++)
785 writeb(NodeName[i],
786 &host->cfgtable->ServerName[i]);
787
788 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
789
790 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
791 if (!(readl(host->vaddr + SA5_DOORBELL)
792 & CFGTBL_ChangeReq))
793 break;
794 /* delay and try again */
795 udelay(1000);
796 }
797 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
798 if (i >= MAX_IOCTL_CONFIG_WAIT)
799 return -EAGAIN;
800 return 0;
801 }
802
803 case CCISS_GETHEARTBEAT:
804 {
805 Heartbeat_type heartbeat;
806
807 if (!arg)
808 return -EINVAL;
809 heartbeat = readl(&host->cfgtable->HeartBeat);
810 if (copy_to_user
811 (argp, &heartbeat, sizeof(Heartbeat_type)))
812 return -EFAULT;
813 return 0;
814 }
815 case CCISS_GETBUSTYPES:
816 {
817 BusTypes_type BusTypes;
818
819 if (!arg)
820 return -EINVAL;
821 BusTypes = readl(&host->cfgtable->BusTypes);
822 if (copy_to_user
823 (argp, &BusTypes, sizeof(BusTypes_type)))
824 return -EFAULT;
825 return 0;
826 }
827 case CCISS_GETFIRMVER:
828 {
829 FirmwareVer_type firmware;
830
831 if (!arg)
832 return -EINVAL;
833 memcpy(firmware, host->firm_ver, 4);
834
835 if (copy_to_user
836 (argp, firmware, sizeof(FirmwareVer_type)))
837 return -EFAULT;
838 return 0;
839 }
840 case CCISS_GETDRIVVER:
841 {
842 DriverVer_type DriverVer = DRIVER_VERSION;
843
844 if (!arg)
845 return -EINVAL;
846
847 if (copy_to_user
848 (argp, &DriverVer, sizeof(DriverVer_type)))
849 return -EFAULT;
850 return 0;
851 }
852
853 case CCISS_REVALIDVOLS:
854 if (bdev != bdev->bd_contains || drv != host->drv)
855 return -ENXIO;
856 return revalidate_allvol(host);
857
858 case CCISS_GETLUNINFO:{
859 LogvolInfo_struct luninfo;
860
861 luninfo.LunID = drv->LunID;
862 luninfo.num_opens = drv->usage_count;
863 luninfo.num_parts = 0;
864 if (copy_to_user(argp, &luninfo,
865 sizeof(LogvolInfo_struct)))
866 return -EFAULT;
867 return 0;
868 }
869 case CCISS_DEREGDISK:
870 return rebuild_lun_table(host, disk);
871
872 case CCISS_REGNEWD:
873 return rebuild_lun_table(host, NULL);
874
875 case CCISS_PASSTHRU:
876 {
877 IOCTL_Command_struct iocommand;
878 CommandList_struct *c;
879 char *buff = NULL;
880 u64bit temp64;
881 unsigned long flags;
882 DECLARE_COMPLETION(wait);
883
884 if (!arg)
885 return -EINVAL;
886
887 if (!capable(CAP_SYS_RAWIO))
888 return -EPERM;
889
890 if (copy_from_user
891 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
892 return -EFAULT;
893 if ((iocommand.buf_size < 1) &&
894 (iocommand.Request.Type.Direction != XFER_NONE)) {
895 return -EINVAL;
896 }
897 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
898 /* Check kmalloc limits */
899 if (iocommand.buf_size > 128000)
900 return -EINVAL;
901 #endif
902 if (iocommand.buf_size > 0) {
903 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
904 if (buff == NULL)
905 return -EFAULT;
906 }
907 if (iocommand.Request.Type.Direction == XFER_WRITE) {
908 /* Copy the data into the buffer we created */
909 if (copy_from_user
910 (buff, iocommand.buf, iocommand.buf_size)) {
911 kfree(buff);
912 return -EFAULT;
913 }
914 } else {
915 memset(buff, 0, iocommand.buf_size);
916 }
917 if ((c = cmd_alloc(host, 0)) == NULL) {
918 kfree(buff);
919 return -ENOMEM;
920 }
921 // Fill in the command type
922 c->cmd_type = CMD_IOCTL_PEND;
923 // Fill in Command Header
924 c->Header.ReplyQueue = 0; // unused in simple mode
925 if (iocommand.buf_size > 0) // buffer to fill
926 {
927 c->Header.SGList = 1;
928 c->Header.SGTotal = 1;
929 } else // no buffers to fill
930 {
931 c->Header.SGList = 0;
932 c->Header.SGTotal = 0;
933 }
934 c->Header.LUN = iocommand.LUN_info;
935 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
936
937 // Fill in Request block
938 c->Request = iocommand.Request;
939
940 // Fill in the scatter gather information
941 if (iocommand.buf_size > 0) {
942 temp64.val = pci_map_single(host->pdev, buff,
943 iocommand.buf_size,
944 PCI_DMA_BIDIRECTIONAL);
945 c->SG[0].Addr.lower = temp64.val32.lower;
946 c->SG[0].Addr.upper = temp64.val32.upper;
947 c->SG[0].Len = iocommand.buf_size;
948 c->SG[0].Ext = 0; // we are not chaining
949 }
950 c->waiting = &wait;
951
952 /* Put the request on the tail of the request queue */
953 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
954 addQ(&host->reqQ, c);
955 host->Qdepth++;
956 start_io(host);
957 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
958
959 wait_for_completion(&wait);
960
961 /* unlock the buffers from DMA */
962 temp64.val32.lower = c->SG[0].Addr.lower;
963 temp64.val32.upper = c->SG[0].Addr.upper;
964 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
965 iocommand.buf_size,
966 PCI_DMA_BIDIRECTIONAL);
967
968 /* Copy the error information out */
969 iocommand.error_info = *(c->err_info);
970 if (copy_to_user
971 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
972 kfree(buff);
973 cmd_free(host, c, 0);
974 return -EFAULT;
975 }
976
977 if (iocommand.Request.Type.Direction == XFER_READ) {
978 /* Copy the data out of the buffer we created */
979 if (copy_to_user
980 (iocommand.buf, buff, iocommand.buf_size)) {
981 kfree(buff);
982 cmd_free(host, c, 0);
983 return -EFAULT;
984 }
985 }
986 kfree(buff);
987 cmd_free(host, c, 0);
988 return 0;
989 }
990 case CCISS_BIG_PASSTHRU:{
991 BIG_IOCTL_Command_struct *ioc;
992 CommandList_struct *c;
993 unsigned char **buff = NULL;
994 int *buff_size = NULL;
995 u64bit temp64;
996 unsigned long flags;
997 BYTE sg_used = 0;
998 int status = 0;
999 int i;
1000 DECLARE_COMPLETION(wait);
1001 __u32 left;
1002 __u32 sz;
1003 BYTE __user *data_ptr;
1004
1005 if (!arg)
1006 return -EINVAL;
1007 if (!capable(CAP_SYS_RAWIO))
1008 return -EPERM;
1009 ioc = (BIG_IOCTL_Command_struct *)
1010 kmalloc(sizeof(*ioc), GFP_KERNEL);
1011 if (!ioc) {
1012 status = -ENOMEM;
1013 goto cleanup1;
1014 }
1015 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1016 status = -EFAULT;
1017 goto cleanup1;
1018 }
1019 if ((ioc->buf_size < 1) &&
1020 (ioc->Request.Type.Direction != XFER_NONE)) {
1021 status = -EINVAL;
1022 goto cleanup1;
1023 }
1024 /* Check kmalloc limits using all SGs */
1025 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1026 status = -EINVAL;
1027 goto cleanup1;
1028 }
1029 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1030 status = -EINVAL;
1031 goto cleanup1;
1032 }
1033 buff =
1034 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1035 if (!buff) {
1036 status = -ENOMEM;
1037 goto cleanup1;
1038 }
1039 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1040 GFP_KERNEL);
1041 if (!buff_size) {
1042 status = -ENOMEM;
1043 goto cleanup1;
1044 }
1045 left = ioc->buf_size;
1046 data_ptr = ioc->buf;
1047 while (left) {
1048 sz = (left >
1049 ioc->malloc_size) ? ioc->
1050 malloc_size : left;
1051 buff_size[sg_used] = sz;
1052 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1053 if (buff[sg_used] == NULL) {
1054 status = -ENOMEM;
1055 goto cleanup1;
1056 }
1057 if (ioc->Request.Type.Direction == XFER_WRITE) {
1058 if (copy_from_user
1059 (buff[sg_used], data_ptr, sz)) {
1060 status = -ENOMEM;
1061 goto cleanup1;
1062 }
1063 } else {
1064 memset(buff[sg_used], 0, sz);
1065 }
1066 left -= sz;
1067 data_ptr += sz;
1068 sg_used++;
1069 }
1070 if ((c = cmd_alloc(host, 0)) == NULL) {
1071 status = -ENOMEM;
1072 goto cleanup1;
1073 }
1074 c->cmd_type = CMD_IOCTL_PEND;
1075 c->Header.ReplyQueue = 0;
1076
1077 if (ioc->buf_size > 0) {
1078 c->Header.SGList = sg_used;
1079 c->Header.SGTotal = sg_used;
1080 } else {
1081 c->Header.SGList = 0;
1082 c->Header.SGTotal = 0;
1083 }
1084 c->Header.LUN = ioc->LUN_info;
1085 c->Header.Tag.lower = c->busaddr;
1086
1087 c->Request = ioc->Request;
1088 if (ioc->buf_size > 0) {
1089 int i;
1090 for (i = 0; i < sg_used; i++) {
1091 temp64.val =
1092 pci_map_single(host->pdev, buff[i],
1093 buff_size[i],
1094 PCI_DMA_BIDIRECTIONAL);
1095 c->SG[i].Addr.lower =
1096 temp64.val32.lower;
1097 c->SG[i].Addr.upper =
1098 temp64.val32.upper;
1099 c->SG[i].Len = buff_size[i];
1100 c->SG[i].Ext = 0; /* we are not chaining */
1101 }
1102 }
1103 c->waiting = &wait;
1104 /* Put the request on the tail of the request queue */
1105 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1106 addQ(&host->reqQ, c);
1107 host->Qdepth++;
1108 start_io(host);
1109 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1110 wait_for_completion(&wait);
1111 /* unlock the buffers from DMA */
1112 for (i = 0; i < sg_used; i++) {
1113 temp64.val32.lower = c->SG[i].Addr.lower;
1114 temp64.val32.upper = c->SG[i].Addr.upper;
1115 pci_unmap_single(host->pdev,
1116 (dma_addr_t) temp64.val, buff_size[i],
1117 PCI_DMA_BIDIRECTIONAL);
1118 }
1119 /* Copy the error information out */
1120 ioc->error_info = *(c->err_info);
1121 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1122 cmd_free(host, c, 0);
1123 status = -EFAULT;
1124 goto cleanup1;
1125 }
1126 if (ioc->Request.Type.Direction == XFER_READ) {
1127 /* Copy the data out of the buffer we created */
1128 BYTE __user *ptr = ioc->buf;
1129 for (i = 0; i < sg_used; i++) {
1130 if (copy_to_user
1131 (ptr, buff[i], buff_size[i])) {
1132 cmd_free(host, c, 0);
1133 status = -EFAULT;
1134 goto cleanup1;
1135 }
1136 ptr += buff_size[i];
1137 }
1138 }
1139 cmd_free(host, c, 0);
1140 status = 0;
1141 cleanup1:
1142 if (buff) {
1143 for (i = 0; i < sg_used; i++)
1144 kfree(buff[i]);
1145 kfree(buff);
1146 }
1147 kfree(buff_size);
1148 kfree(ioc);
1149 return status;
1150 }
1151 default:
1152 return -ENOTTY;
1153 }
1154 }
1155
1156 /*
1157 * revalidate_allvol is for online array config utilities. After a
1158 * utility reconfigures the drives in the array, it can use this function
1159 * (through an ioctl) to make the driver zap any previous disk structs for
1160 * that controller and get new ones.
1161 *
1162 * Right now I'm using the getgeometry() function to do this, but this
1163 * function should probably be finer grained and allow you to revalidate one
1164 * particular logical volume (instead of all of them on a particular
1165 * controller).
1166 */
1167 static int revalidate_allvol(ctlr_info_t *host)
1168 {
1169 int ctlr = host->ctlr, i;
1170 unsigned long flags;
1171
1172 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1173 if (host->usage_count > 1) {
1174 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1175 printk(KERN_WARNING "cciss: Device busy for volume"
1176 " revalidation (usage=%d)\n", host->usage_count);
1177 return -EBUSY;
1178 }
1179 host->usage_count++;
1180 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1181
1182 for (i = 0; i < NWD; i++) {
1183 struct gendisk *disk = host->gendisk[i];
1184 if (disk) {
1185 request_queue_t *q = disk->queue;
1186
1187 if (disk->flags & GENHD_FL_UP)
1188 del_gendisk(disk);
1189 if (q)
1190 blk_cleanup_queue(q);
1191 }
1192 }
1193
1194 /*
1195 * Set the partition and block size structures for all volumes
1196 * on this controller to zero. We will reread all of this data
1197 */
1198 memset(host->drv, 0, sizeof(drive_info_struct)
1199 * CISS_MAX_LUN);
1200 /*
1201 * Tell the array controller not to give us any interrupts while
1202 * we check the new geometry. Then turn interrupts back on when
1203 * we're done.
1204 */
1205 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1206 cciss_getgeometry(ctlr);
1207 host->access.set_intr_mask(host, CCISS_INTR_ON);
1208
1209 /* Loop through each real device */
1210 for (i = 0; i < NWD; i++) {
1211 struct gendisk *disk = host->gendisk[i];
1212 drive_info_struct *drv = &(host->drv[i]);
1213 /* we must register the controller even if no disks exist */
1214 /* this is for the online array utilities */
1215 if (!drv->heads && i)
1216 continue;
1217 blk_queue_hardsect_size(drv->queue, drv->block_size);
1218 set_capacity(disk, drv->nr_blocks);
1219 add_disk(disk);
1220 }
1221 host->usage_count--;
1222 return 0;
1223 }
1224
1225 static inline void complete_buffers(struct bio *bio, int status)
1226 {
1227 while (bio) {
1228 struct bio *xbh = bio->bi_next;
1229 int nr_sectors = bio_sectors(bio);
1230
1231 bio->bi_next = NULL;
1232 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1233 bio = xbh;
1234 }
1235 }
1236
1237 static void cciss_check_queues(ctlr_info_t *h)
1238 {
1239 int start_queue = h->next_to_run;
1240 int i;
1241
1242 /* check to see if we have maxed out the number of commands that can
1243 * be placed on the queue. If so then exit. We do this check here
1244 * in case the interrupt we serviced was from an ioctl and did not
1245 * free any new commands.
1246 */
1247 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1248 return;
1249
1250 /* We have room on the queue for more commands. Now we need to queue
1251 * them up. We will also keep track of the next queue to run so
1252 * that every queue gets a chance to be started first.
1253 */
1254 for (i = 0; i < h->highest_lun + 1; i++) {
1255 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1256 /* make sure the disk has been added and the drive is real
1257 * because this can be called from the middle of init_one.
1258 */
1259 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1260 continue;
1261 blk_start_queue(h->gendisk[curr_queue]->queue);
1262
1263 /* check to see if we have maxed out the number of commands
1264 * that can be placed on the queue.
1265 */
1266 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1267 if (curr_queue == start_queue) {
1268 h->next_to_run =
1269 (start_queue + 1) % (h->highest_lun + 1);
1270 break;
1271 } else {
1272 h->next_to_run = curr_queue;
1273 break;
1274 }
1275 } else {
1276 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1277 }
1278 }
1279 }
1280
1281 static void cciss_softirq_done(struct request *rq)
1282 {
1283 CommandList_struct *cmd = rq->completion_data;
1284 ctlr_info_t *h = hba[cmd->ctlr];
1285 unsigned long flags;
1286 u64bit temp64;
1287 int i, ddir;
1288
1289 if (cmd->Request.Type.Direction == XFER_READ)
1290 ddir = PCI_DMA_FROMDEVICE;
1291 else
1292 ddir = PCI_DMA_TODEVICE;
1293
1294 /* command did not need to be retried */
1295 /* unmap the DMA mapping for all the scatter gather elements */
1296 for (i = 0; i < cmd->Header.SGList; i++) {
1297 temp64.val32.lower = cmd->SG[i].Addr.lower;
1298 temp64.val32.upper = cmd->SG[i].Addr.upper;
1299 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1300 }
1301
1302 complete_buffers(rq->bio, rq->errors);
1303
1304 #ifdef CCISS_DEBUG
1305 printk("Done with %p\n", rq);
1306 #endif /* CCISS_DEBUG */
1307
1308 add_disk_randomness(rq->rq_disk);
1309 spin_lock_irqsave(&h->lock, flags);
1310 end_that_request_last(rq, rq->errors);
1311 cmd_free(h, cmd, 1);
1312 cciss_check_queues(h);
1313 spin_unlock_irqrestore(&h->lock, flags);
1314 }
1315
1316 /* This function will check the usage_count of the drive to be updated/added.
1317 * If the usage_count is zero then the drive information will be updated and
1318 * the disk will be re-registered with the kernel. If not then it will be
1319 * left alone for the next reboot. The exception to this is disk 0 which
1320 * will always be left registered with the kernel since it is also the
1321 * controller node. Any changes to disk 0 will show up on the next
1322 * reboot.
1323 */
1324 static void cciss_update_drive_info(int ctlr, int drv_index)
1325 {
1326 ctlr_info_t *h = hba[ctlr];
1327 struct gendisk *disk;
1328 ReadCapdata_struct *size_buff = NULL;
1329 InquiryData_struct *inq_buff = NULL;
1330 unsigned int block_size;
1331 unsigned int total_size;
1332 unsigned long flags = 0;
1333 int ret = 0;
1334
1335 /* if the disk already exists then deregister it before proceeding */
1336 if (h->drv[drv_index].raid_level != -1) {
1337 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1338 h->drv[drv_index].busy_configuring = 1;
1339 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1340 ret = deregister_disk(h->gendisk[drv_index],
1341 &h->drv[drv_index], 0);
1342 h->drv[drv_index].busy_configuring = 0;
1343 }
1344
1345 /* If the disk is in use return */
1346 if (ret)
1347 return;
1348
1349 /* Get information about the disk and modify the driver structure */
1350 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1351 if (size_buff == NULL)
1352 goto mem_msg;
1353 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1354 if (inq_buff == NULL)
1355 goto mem_msg;
1356
1357 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1358 &total_size, &block_size);
1359 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1360 inq_buff, &h->drv[drv_index]);
1361
1362 ++h->num_luns;
1363 disk = h->gendisk[drv_index];
1364 set_capacity(disk, h->drv[drv_index].nr_blocks);
1365
1366 /* if it's the controller it's already added */
1367 if (drv_index) {
1368 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1369
1370 /* Set up queue information */
1371 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1372 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1373
1374 /* This is a hardware imposed limit. */
1375 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1376
1377 /* This is a limit in the driver and could be eliminated. */
1378 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1379
1380 blk_queue_max_sectors(disk->queue, 512);
1381
1382 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1383
1384 disk->queue->queuedata = hba[ctlr];
1385
1386 blk_queue_hardsect_size(disk->queue,
1387 hba[ctlr]->drv[drv_index].block_size);
1388
1389 h->drv[drv_index].queue = disk->queue;
1390 add_disk(disk);
1391 }
1392
1393 freeret:
1394 kfree(size_buff);
1395 kfree(inq_buff);
1396 return;
1397 mem_msg:
1398 printk(KERN_ERR "cciss: out of memory\n");
1399 goto freeret;
1400 }
1401
1402 /* This function will find the first index of the controllers drive array
1403 * that has a -1 for the raid_level and will return that index. This is
1404 * where new drives will be added. If the index to be returned is greater
1405 * than the highest_lun index for the controller then highest_lun is set
1406 * to this new index. If there are no available indexes then -1 is returned.
1407 */
1408 static int cciss_find_free_drive_index(int ctlr)
1409 {
1410 int i;
1411
1412 for (i = 0; i < CISS_MAX_LUN; i++) {
1413 if (hba[ctlr]->drv[i].raid_level == -1) {
1414 if (i > hba[ctlr]->highest_lun)
1415 hba[ctlr]->highest_lun = i;
1416 return i;
1417 }
1418 }
1419 return -1;
1420 }
1421
1422 /* This function will add and remove logical drives from the Logical
1423 * drive array of the controller and maintain persistency of ordering
1424 * so that mount points are preserved until the next reboot. This allows
1425 * for the removal of logical drives in the middle of the drive array
1426 * without a re-ordering of those drives.
1427 * INPUT
1428 * h = The controller to perform the operations on
1429 * del_disk = The disk to remove if specified. If the value given
1430 * is NULL then no disk is removed.
1431 */
1432 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1433 {
1434 int ctlr = h->ctlr;
1435 int num_luns;
1436 ReportLunData_struct *ld_buff = NULL;
1437 drive_info_struct *drv = NULL;
1438 int return_code;
1439 int listlength = 0;
1440 int i;
1441 int drv_found;
1442 int drv_index = 0;
1443 __u32 lunid = 0;
1444 unsigned long flags;
1445
1446 /* Set busy_configuring flag for this operation */
1447 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1448 if (h->num_luns >= CISS_MAX_LUN) {
1449 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1450 return -EINVAL;
1451 }
1452
1453 if (h->busy_configuring) {
1454 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1455 return -EBUSY;
1456 }
1457 h->busy_configuring = 1;
1458
1459 /* if del_disk is NULL then we are being called to add a new disk
1460 * and update the logical drive table. If it is not NULL then
1461 * we will check if the disk is in use or not.
1462 */
1463 if (del_disk != NULL) {
1464 drv = get_drv(del_disk);
1465 drv->busy_configuring = 1;
1466 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1467 return_code = deregister_disk(del_disk, drv, 1);
1468 drv->busy_configuring = 0;
1469 h->busy_configuring = 0;
1470 return return_code;
1471 } else {
1472 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1473 if (!capable(CAP_SYS_RAWIO))
1474 return -EPERM;
1475
1476 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1477 if (ld_buff == NULL)
1478 goto mem_msg;
1479
1480 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1481 sizeof(ReportLunData_struct), 0,
1482 0, 0, TYPE_CMD);
1483
1484 if (return_code == IO_OK) {
1485 listlength |=
1486 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1487 << 24;
1488 listlength |=
1489 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1490 << 16;
1491 listlength |=
1492 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1493 << 8;
1494 listlength |=
1495 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1496 } else { /* reading number of logical volumes failed */
1497 printk(KERN_WARNING "cciss: report logical volume"
1498 " command failed\n");
1499 listlength = 0;
1500 goto freeret;
1501 }
1502
1503 num_luns = listlength / 8; /* 8 bytes per entry */
1504 if (num_luns > CISS_MAX_LUN) {
1505 num_luns = CISS_MAX_LUN;
1506 printk(KERN_WARNING "cciss: more luns configured"
1507 " on controller than can be handled by"
1508 " this driver.\n");
1509 }
1510
1511 /* Compare controller drive array to drivers drive array.
1512 * Check for updates in the drive information and any new drives
1513 * on the controller.
1514 */
1515 for (i = 0; i < num_luns; i++) {
1516 int j;
1517
1518 drv_found = 0;
1519
1520 lunid = (0xff &
1521 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1522 lunid |= (0xff &
1523 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1524 lunid |= (0xff &
1525 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1526 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1527
1528 /* Find if the LUN is already in the drive array
1529 * of the controller. If so then update its info
1530 * if not is use. If it does not exist then find
1531 * the first free index and add it.
1532 */
1533 for (j = 0; j <= h->highest_lun; j++) {
1534 if (h->drv[j].LunID == lunid) {
1535 drv_index = j;
1536 drv_found = 1;
1537 }
1538 }
1539
1540 /* check if the drive was found already in the array */
1541 if (!drv_found) {
1542 drv_index = cciss_find_free_drive_index(ctlr);
1543 if (drv_index == -1)
1544 goto freeret;
1545
1546 }
1547 h->drv[drv_index].LunID = lunid;
1548 cciss_update_drive_info(ctlr, drv_index);
1549 } /* end for */
1550 } /* end else */
1551
1552 freeret:
1553 kfree(ld_buff);
1554 h->busy_configuring = 0;
1555 /* We return -1 here to tell the ACU that we have registered/updated
1556 * all of the drives that we can and to keep it from calling us
1557 * additional times.
1558 */
1559 return -1;
1560 mem_msg:
1561 printk(KERN_ERR "cciss: out of memory\n");
1562 goto freeret;
1563 }
1564
1565 /* This function will deregister the disk and it's queue from the
1566 * kernel. It must be called with the controller lock held and the
1567 * drv structures busy_configuring flag set. It's parameters are:
1568 *
1569 * disk = This is the disk to be deregistered
1570 * drv = This is the drive_info_struct associated with the disk to be
1571 * deregistered. It contains information about the disk used
1572 * by the driver.
1573 * clear_all = This flag determines whether or not the disk information
1574 * is going to be completely cleared out and the highest_lun
1575 * reset. Sometimes we want to clear out information about
1576 * the disk in preparation for re-adding it. In this case
1577 * the highest_lun should be left unchanged and the LunID
1578 * should not be cleared.
1579 */
1580 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1581 int clear_all)
1582 {
1583 ctlr_info_t *h = get_host(disk);
1584
1585 if (!capable(CAP_SYS_RAWIO))
1586 return -EPERM;
1587
1588 /* make sure logical volume is NOT is use */
1589 if (clear_all || (h->gendisk[0] == disk)) {
1590 if (drv->usage_count > 1)
1591 return -EBUSY;
1592 } else if (drv->usage_count > 0)
1593 return -EBUSY;
1594
1595 /* invalidate the devices and deregister the disk. If it is disk
1596 * zero do not deregister it but just zero out it's values. This
1597 * allows us to delete disk zero but keep the controller registered.
1598 */
1599 if (h->gendisk[0] != disk) {
1600 if (disk) {
1601 request_queue_t *q = disk->queue;
1602 if (disk->flags & GENHD_FL_UP)
1603 del_gendisk(disk);
1604 if (q) {
1605 blk_cleanup_queue(q);
1606 drv->queue = NULL;
1607 }
1608 }
1609 }
1610
1611 --h->num_luns;
1612 /* zero out the disk size info */
1613 drv->nr_blocks = 0;
1614 drv->block_size = 0;
1615 drv->heads = 0;
1616 drv->sectors = 0;
1617 drv->cylinders = 0;
1618 drv->raid_level = -1; /* This can be used as a flag variable to
1619 * indicate that this element of the drive
1620 * array is free.
1621 */
1622
1623 if (clear_all) {
1624 /* check to see if it was the last disk */
1625 if (drv == h->drv + h->highest_lun) {
1626 /* if so, find the new hightest lun */
1627 int i, newhighest = -1;
1628 for (i = 0; i < h->highest_lun; i++) {
1629 /* if the disk has size > 0, it is available */
1630 if (h->drv[i].heads)
1631 newhighest = i;
1632 }
1633 h->highest_lun = newhighest;
1634 }
1635
1636 drv->LunID = 0;
1637 }
1638 return 0;
1639 }
1640
1641 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1642 1: address logical volume log_unit,
1643 2: periph device address is scsi3addr */
1644 unsigned int log_unit, __u8 page_code,
1645 unsigned char *scsi3addr, int cmd_type)
1646 {
1647 ctlr_info_t *h = hba[ctlr];
1648 u64bit buff_dma_handle;
1649 int status = IO_OK;
1650
1651 c->cmd_type = CMD_IOCTL_PEND;
1652 c->Header.ReplyQueue = 0;
1653 if (buff != NULL) {
1654 c->Header.SGList = 1;
1655 c->Header.SGTotal = 1;
1656 } else {
1657 c->Header.SGList = 0;
1658 c->Header.SGTotal = 0;
1659 }
1660 c->Header.Tag.lower = c->busaddr;
1661
1662 c->Request.Type.Type = cmd_type;
1663 if (cmd_type == TYPE_CMD) {
1664 switch (cmd) {
1665 case CISS_INQUIRY:
1666 /* If the logical unit number is 0 then, this is going
1667 to controller so It's a physical command
1668 mode = 0 target = 0. So we have nothing to write.
1669 otherwise, if use_unit_num == 1,
1670 mode = 1(volume set addressing) target = LUNID
1671 otherwise, if use_unit_num == 2,
1672 mode = 0(periph dev addr) target = scsi3addr */
1673 if (use_unit_num == 1) {
1674 c->Header.LUN.LogDev.VolId =
1675 h->drv[log_unit].LunID;
1676 c->Header.LUN.LogDev.Mode = 1;
1677 } else if (use_unit_num == 2) {
1678 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1679 8);
1680 c->Header.LUN.LogDev.Mode = 0;
1681 }
1682 /* are we trying to read a vital product page */
1683 if (page_code != 0) {
1684 c->Request.CDB[1] = 0x01;
1685 c->Request.CDB[2] = page_code;
1686 }
1687 c->Request.CDBLen = 6;
1688 c->Request.Type.Attribute = ATTR_SIMPLE;
1689 c->Request.Type.Direction = XFER_READ;
1690 c->Request.Timeout = 0;
1691 c->Request.CDB[0] = CISS_INQUIRY;
1692 c->Request.CDB[4] = size & 0xFF;
1693 break;
1694 case CISS_REPORT_LOG:
1695 case CISS_REPORT_PHYS:
1696 /* Talking to controller so It's a physical command
1697 mode = 00 target = 0. Nothing to write.
1698 */
1699 c->Request.CDBLen = 12;
1700 c->Request.Type.Attribute = ATTR_SIMPLE;
1701 c->Request.Type.Direction = XFER_READ;
1702 c->Request.Timeout = 0;
1703 c->Request.CDB[0] = cmd;
1704 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1705 c->Request.CDB[7] = (size >> 16) & 0xFF;
1706 c->Request.CDB[8] = (size >> 8) & 0xFF;
1707 c->Request.CDB[9] = size & 0xFF;
1708 break;
1709
1710 case CCISS_READ_CAPACITY:
1711 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1712 c->Header.LUN.LogDev.Mode = 1;
1713 c->Request.CDBLen = 10;
1714 c->Request.Type.Attribute = ATTR_SIMPLE;
1715 c->Request.Type.Direction = XFER_READ;
1716 c->Request.Timeout = 0;
1717 c->Request.CDB[0] = cmd;
1718 break;
1719 case CCISS_CACHE_FLUSH:
1720 c->Request.CDBLen = 12;
1721 c->Request.Type.Attribute = ATTR_SIMPLE;
1722 c->Request.Type.Direction = XFER_WRITE;
1723 c->Request.Timeout = 0;
1724 c->Request.CDB[0] = BMIC_WRITE;
1725 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1726 break;
1727 default:
1728 printk(KERN_WARNING
1729 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1730 return IO_ERROR;
1731 }
1732 } else if (cmd_type == TYPE_MSG) {
1733 switch (cmd) {
1734 case 0: /* ABORT message */
1735 c->Request.CDBLen = 12;
1736 c->Request.Type.Attribute = ATTR_SIMPLE;
1737 c->Request.Type.Direction = XFER_WRITE;
1738 c->Request.Timeout = 0;
1739 c->Request.CDB[0] = cmd; /* abort */
1740 c->Request.CDB[1] = 0; /* abort a command */
1741 /* buff contains the tag of the command to abort */
1742 memcpy(&c->Request.CDB[4], buff, 8);
1743 break;
1744 case 1: /* RESET message */
1745 c->Request.CDBLen = 12;
1746 c->Request.Type.Attribute = ATTR_SIMPLE;
1747 c->Request.Type.Direction = XFER_WRITE;
1748 c->Request.Timeout = 0;
1749 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1750 c->Request.CDB[0] = cmd; /* reset */
1751 c->Request.CDB[1] = 0x04; /* reset a LUN */
1752 case 3: /* No-Op message */
1753 c->Request.CDBLen = 1;
1754 c->Request.Type.Attribute = ATTR_SIMPLE;
1755 c->Request.Type.Direction = XFER_WRITE;
1756 c->Request.Timeout = 0;
1757 c->Request.CDB[0] = cmd;
1758 break;
1759 default:
1760 printk(KERN_WARNING
1761 "cciss%d: unknown message type %d\n", ctlr, cmd);
1762 return IO_ERROR;
1763 }
1764 } else {
1765 printk(KERN_WARNING
1766 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1767 return IO_ERROR;
1768 }
1769 /* Fill in the scatter gather information */
1770 if (size > 0) {
1771 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1772 buff, size,
1773 PCI_DMA_BIDIRECTIONAL);
1774 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1775 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1776 c->SG[0].Len = size;
1777 c->SG[0].Ext = 0; /* we are not chaining */
1778 }
1779 return status;
1780 }
1781
1782 static int sendcmd_withirq(__u8 cmd,
1783 int ctlr,
1784 void *buff,
1785 size_t size,
1786 unsigned int use_unit_num,
1787 unsigned int log_unit, __u8 page_code, int cmd_type)
1788 {
1789 ctlr_info_t *h = hba[ctlr];
1790 CommandList_struct *c;
1791 u64bit buff_dma_handle;
1792 unsigned long flags;
1793 int return_status;
1794 DECLARE_COMPLETION(wait);
1795
1796 if ((c = cmd_alloc(h, 0)) == NULL)
1797 return -ENOMEM;
1798 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1799 log_unit, page_code, NULL, cmd_type);
1800 if (return_status != IO_OK) {
1801 cmd_free(h, c, 0);
1802 return return_status;
1803 }
1804 resend_cmd2:
1805 c->waiting = &wait;
1806
1807 /* Put the request on the tail of the queue and send it */
1808 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1809 addQ(&h->reqQ, c);
1810 h->Qdepth++;
1811 start_io(h);
1812 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1813
1814 wait_for_completion(&wait);
1815
1816 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1817 switch (c->err_info->CommandStatus) {
1818 case CMD_TARGET_STATUS:
1819 printk(KERN_WARNING "cciss: cmd %p has "
1820 " completed with errors\n", c);
1821 if (c->err_info->ScsiStatus) {
1822 printk(KERN_WARNING "cciss: cmd %p "
1823 "has SCSI Status = %x\n",
1824 c, c->err_info->ScsiStatus);
1825 }
1826
1827 break;
1828 case CMD_DATA_UNDERRUN:
1829 case CMD_DATA_OVERRUN:
1830 /* expected for inquire and report lun commands */
1831 break;
1832 case CMD_INVALID:
1833 printk(KERN_WARNING "cciss: Cmd %p is "
1834 "reported invalid\n", c);
1835 return_status = IO_ERROR;
1836 break;
1837 case CMD_PROTOCOL_ERR:
1838 printk(KERN_WARNING "cciss: cmd %p has "
1839 "protocol error \n", c);
1840 return_status = IO_ERROR;
1841 break;
1842 case CMD_HARDWARE_ERR:
1843 printk(KERN_WARNING "cciss: cmd %p had "
1844 " hardware error\n", c);
1845 return_status = IO_ERROR;
1846 break;
1847 case CMD_CONNECTION_LOST:
1848 printk(KERN_WARNING "cciss: cmd %p had "
1849 "connection lost\n", c);
1850 return_status = IO_ERROR;
1851 break;
1852 case CMD_ABORTED:
1853 printk(KERN_WARNING "cciss: cmd %p was "
1854 "aborted\n", c);
1855 return_status = IO_ERROR;
1856 break;
1857 case CMD_ABORT_FAILED:
1858 printk(KERN_WARNING "cciss: cmd %p reports "
1859 "abort failed\n", c);
1860 return_status = IO_ERROR;
1861 break;
1862 case CMD_UNSOLICITED_ABORT:
1863 printk(KERN_WARNING
1864 "cciss%d: unsolicited abort %p\n", ctlr, c);
1865 if (c->retry_count < MAX_CMD_RETRIES) {
1866 printk(KERN_WARNING
1867 "cciss%d: retrying %p\n", ctlr, c);
1868 c->retry_count++;
1869 /* erase the old error information */
1870 memset(c->err_info, 0,
1871 sizeof(ErrorInfo_struct));
1872 return_status = IO_OK;
1873 INIT_COMPLETION(wait);
1874 goto resend_cmd2;
1875 }
1876 return_status = IO_ERROR;
1877 break;
1878 default:
1879 printk(KERN_WARNING "cciss: cmd %p returned "
1880 "unknown status %x\n", c,
1881 c->err_info->CommandStatus);
1882 return_status = IO_ERROR;
1883 }
1884 }
1885 /* unlock the buffers from DMA */
1886 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1887 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1888 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1889 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1890 cmd_free(h, c, 0);
1891 return return_status;
1892 }
1893
1894 static void cciss_geometry_inquiry(int ctlr, int logvol,
1895 int withirq, unsigned int total_size,
1896 unsigned int block_size,
1897 InquiryData_struct *inq_buff,
1898 drive_info_struct *drv)
1899 {
1900 int return_code;
1901 memset(inq_buff, 0, sizeof(InquiryData_struct));
1902 if (withirq)
1903 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1904 inq_buff, sizeof(*inq_buff), 1,
1905 logvol, 0xC1, TYPE_CMD);
1906 else
1907 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1908 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1909 TYPE_CMD);
1910 if (return_code == IO_OK) {
1911 if (inq_buff->data_byte[8] == 0xFF) {
1912 printk(KERN_WARNING
1913 "cciss: reading geometry failed, volume "
1914 "does not support reading geometry\n");
1915 drv->block_size = block_size;
1916 drv->nr_blocks = total_size;
1917 drv->heads = 255;
1918 drv->sectors = 32; // Sectors per track
1919 drv->cylinders = total_size / 255 / 32;
1920 } else {
1921 unsigned int t;
1922
1923 drv->block_size = block_size;
1924 drv->nr_blocks = total_size;
1925 drv->heads = inq_buff->data_byte[6];
1926 drv->sectors = inq_buff->data_byte[7];
1927 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1928 drv->cylinders += inq_buff->data_byte[5];
1929 drv->raid_level = inq_buff->data_byte[8];
1930 t = drv->heads * drv->sectors;
1931 if (t > 1) {
1932 drv->cylinders = total_size / t;
1933 }
1934 }
1935 } else { /* Get geometry failed */
1936 printk(KERN_WARNING "cciss: reading geometry failed\n");
1937 }
1938 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1939 drv->heads, drv->sectors, drv->cylinders);
1940 }
1941
1942 static void
1943 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1944 int withirq, unsigned int *total_size,
1945 unsigned int *block_size)
1946 {
1947 int return_code;
1948 memset(buf, 0, sizeof(*buf));
1949 if (withirq)
1950 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1951 ctlr, buf, sizeof(*buf), 1,
1952 logvol, 0, TYPE_CMD);
1953 else
1954 return_code = sendcmd(CCISS_READ_CAPACITY,
1955 ctlr, buf, sizeof(*buf), 1, logvol, 0,
1956 NULL, TYPE_CMD);
1957 if (return_code == IO_OK) {
1958 *total_size =
1959 be32_to_cpu(*((__be32 *) & buf->total_size[0])) + 1;
1960 *block_size = be32_to_cpu(*((__be32 *) & buf->block_size[0]));
1961 } else { /* read capacity command failed */
1962 printk(KERN_WARNING "cciss: read capacity failed\n");
1963 *total_size = 0;
1964 *block_size = BLOCK_SIZE;
1965 }
1966 printk(KERN_INFO " blocks= %u block_size= %d\n",
1967 *total_size, *block_size);
1968 return;
1969 }
1970
1971 static int cciss_revalidate(struct gendisk *disk)
1972 {
1973 ctlr_info_t *h = get_host(disk);
1974 drive_info_struct *drv = get_drv(disk);
1975 int logvol;
1976 int FOUND = 0;
1977 unsigned int block_size;
1978 unsigned int total_size;
1979 ReadCapdata_struct *size_buff = NULL;
1980 InquiryData_struct *inq_buff = NULL;
1981
1982 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
1983 if (h->drv[logvol].LunID == drv->LunID) {
1984 FOUND = 1;
1985 break;
1986 }
1987 }
1988
1989 if (!FOUND)
1990 return 1;
1991
1992 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1993 if (size_buff == NULL) {
1994 printk(KERN_WARNING "cciss: out of memory\n");
1995 return 1;
1996 }
1997 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1998 if (inq_buff == NULL) {
1999 printk(KERN_WARNING "cciss: out of memory\n");
2000 kfree(size_buff);
2001 return 1;
2002 }
2003
2004 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size,
2005 &block_size);
2006 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2007 inq_buff, drv);
2008
2009 blk_queue_hardsect_size(drv->queue, drv->block_size);
2010 set_capacity(disk, drv->nr_blocks);
2011
2012 kfree(size_buff);
2013 kfree(inq_buff);
2014 return 0;
2015 }
2016
2017 /*
2018 * Wait polling for a command to complete.
2019 * The memory mapped FIFO is polled for the completion.
2020 * Used only at init time, interrupts from the HBA are disabled.
2021 */
2022 static unsigned long pollcomplete(int ctlr)
2023 {
2024 unsigned long done;
2025 int i;
2026
2027 /* Wait (up to 20 seconds) for a command to complete */
2028
2029 for (i = 20 * HZ; i > 0; i--) {
2030 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2031 if (done == FIFO_EMPTY)
2032 schedule_timeout_uninterruptible(1);
2033 else
2034 return done;
2035 }
2036 /* Invalid address to tell caller we ran out of time */
2037 return 1;
2038 }
2039
2040 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2041 {
2042 /* We get in here if sendcmd() is polling for completions
2043 and gets some command back that it wasn't expecting --
2044 something other than that which it just sent down.
2045 Ordinarily, that shouldn't happen, but it can happen when
2046 the scsi tape stuff gets into error handling mode, and
2047 starts using sendcmd() to try to abort commands and
2048 reset tape drives. In that case, sendcmd may pick up
2049 completions of commands that were sent to logical drives
2050 through the block i/o system, or cciss ioctls completing, etc.
2051 In that case, we need to save those completions for later
2052 processing by the interrupt handler.
2053 */
2054
2055 #ifdef CONFIG_CISS_SCSI_TAPE
2056 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2057
2058 /* If it's not the scsi tape stuff doing error handling, (abort */
2059 /* or reset) then we don't expect anything weird. */
2060 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2061 #endif
2062 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2063 "Invalid command list address returned! (%lx)\n",
2064 ctlr, complete);
2065 /* not much we can do. */
2066 #ifdef CONFIG_CISS_SCSI_TAPE
2067 return 1;
2068 }
2069
2070 /* We've sent down an abort or reset, but something else
2071 has completed */
2072 if (srl->ncompletions >= (NR_CMDS + 2)) {
2073 /* Uh oh. No room to save it for later... */
2074 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2075 "reject list overflow, command lost!\n", ctlr);
2076 return 1;
2077 }
2078 /* Save it for later */
2079 srl->complete[srl->ncompletions] = complete;
2080 srl->ncompletions++;
2081 #endif
2082 return 0;
2083 }
2084
2085 /*
2086 * Send a command to the controller, and wait for it to complete.
2087 * Only used at init time.
2088 */
2089 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2090 1: address logical volume log_unit,
2091 2: periph device address is scsi3addr */
2092 unsigned int log_unit,
2093 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2094 {
2095 CommandList_struct *c;
2096 int i;
2097 unsigned long complete;
2098 ctlr_info_t *info_p = hba[ctlr];
2099 u64bit buff_dma_handle;
2100 int status, done = 0;
2101
2102 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2103 printk(KERN_WARNING "cciss: unable to get memory");
2104 return IO_ERROR;
2105 }
2106 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2107 log_unit, page_code, scsi3addr, cmd_type);
2108 if (status != IO_OK) {
2109 cmd_free(info_p, c, 1);
2110 return status;
2111 }
2112 resend_cmd1:
2113 /*
2114 * Disable interrupt
2115 */
2116 #ifdef CCISS_DEBUG
2117 printk(KERN_DEBUG "cciss: turning intr off\n");
2118 #endif /* CCISS_DEBUG */
2119 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2120
2121 /* Make sure there is room in the command FIFO */
2122 /* Actually it should be completely empty at this time */
2123 /* unless we are in here doing error handling for the scsi */
2124 /* tape side of the driver. */
2125 for (i = 200000; i > 0; i--) {
2126 /* if fifo isn't full go */
2127 if (!(info_p->access.fifo_full(info_p))) {
2128
2129 break;
2130 }
2131 udelay(10);
2132 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2133 " waiting!\n", ctlr);
2134 }
2135 /*
2136 * Send the cmd
2137 */
2138 info_p->access.submit_command(info_p, c);
2139 done = 0;
2140 do {
2141 complete = pollcomplete(ctlr);
2142
2143 #ifdef CCISS_DEBUG
2144 printk(KERN_DEBUG "cciss: command completed\n");
2145 #endif /* CCISS_DEBUG */
2146
2147 if (complete == 1) {
2148 printk(KERN_WARNING
2149 "cciss cciss%d: SendCmd Timeout out, "
2150 "No command list address returned!\n", ctlr);
2151 status = IO_ERROR;
2152 done = 1;
2153 break;
2154 }
2155
2156 /* This will need to change for direct lookup completions */
2157 if ((complete & CISS_ERROR_BIT)
2158 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2159 /* if data overrun or underun on Report command
2160 ignore it
2161 */
2162 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2163 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2164 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2165 ((c->err_info->CommandStatus ==
2166 CMD_DATA_OVERRUN) ||
2167 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2168 )) {
2169 complete = c->busaddr;
2170 } else {
2171 if (c->err_info->CommandStatus ==
2172 CMD_UNSOLICITED_ABORT) {
2173 printk(KERN_WARNING "cciss%d: "
2174 "unsolicited abort %p\n",
2175 ctlr, c);
2176 if (c->retry_count < MAX_CMD_RETRIES) {
2177 printk(KERN_WARNING
2178 "cciss%d: retrying %p\n",
2179 ctlr, c);
2180 c->retry_count++;
2181 /* erase the old error */
2182 /* information */
2183 memset(c->err_info, 0,
2184 sizeof
2185 (ErrorInfo_struct));
2186 goto resend_cmd1;
2187 } else {
2188 printk(KERN_WARNING
2189 "cciss%d: retried %p too "
2190 "many times\n", ctlr, c);
2191 status = IO_ERROR;
2192 goto cleanup1;
2193 }
2194 } else if (c->err_info->CommandStatus ==
2195 CMD_UNABORTABLE) {
2196 printk(KERN_WARNING
2197 "cciss%d: command could not be aborted.\n",
2198 ctlr);
2199 status = IO_ERROR;
2200 goto cleanup1;
2201 }
2202 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2203 " Error %x \n", ctlr,
2204 c->err_info->CommandStatus);
2205 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2206 " offensive info\n"
2207 " size %x\n num %x value %x\n",
2208 ctlr,
2209 c->err_info->MoreErrInfo.Invalid_Cmd.
2210 offense_size,
2211 c->err_info->MoreErrInfo.Invalid_Cmd.
2212 offense_num,
2213 c->err_info->MoreErrInfo.Invalid_Cmd.
2214 offense_value);
2215 status = IO_ERROR;
2216 goto cleanup1;
2217 }
2218 }
2219 /* This will need changing for direct lookup completions */
2220 if (complete != c->busaddr) {
2221 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2222 BUG(); /* we are pretty much hosed if we get here. */
2223 }
2224 continue;
2225 } else
2226 done = 1;
2227 } while (!done);
2228
2229 cleanup1:
2230 /* unlock the data buffer from DMA */
2231 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2232 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2233 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2234 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2235 #ifdef CONFIG_CISS_SCSI_TAPE
2236 /* if we saved some commands for later, process them now. */
2237 if (info_p->scsi_rejects.ncompletions > 0)
2238 do_cciss_intr(0, info_p, NULL);
2239 #endif
2240 cmd_free(info_p, c, 1);
2241 return status;
2242 }
2243
2244 /*
2245 * Map (physical) PCI mem into (virtual) kernel space
2246 */
2247 static void __iomem *remap_pci_mem(ulong base, ulong size)
2248 {
2249 ulong page_base = ((ulong) base) & PAGE_MASK;
2250 ulong page_offs = ((ulong) base) - page_base;
2251 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2252
2253 return page_remapped ? (page_remapped + page_offs) : NULL;
2254 }
2255
2256 /*
2257 * Takes jobs of the Q and sends them to the hardware, then puts it on
2258 * the Q to wait for completion.
2259 */
2260 static void start_io(ctlr_info_t *h)
2261 {
2262 CommandList_struct *c;
2263
2264 while ((c = h->reqQ) != NULL) {
2265 /* can't do anything if fifo is full */
2266 if ((h->access.fifo_full(h))) {
2267 printk(KERN_WARNING "cciss: fifo full\n");
2268 break;
2269 }
2270
2271 /* Get the first entry from the Request Q */
2272 removeQ(&(h->reqQ), c);
2273 h->Qdepth--;
2274
2275 /* Tell the controller execute command */
2276 h->access.submit_command(h, c);
2277
2278 /* Put job onto the completed Q */
2279 addQ(&(h->cmpQ), c);
2280 }
2281 }
2282
2283 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2284 /* Zeros out the error record and then resends the command back */
2285 /* to the controller */
2286 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2287 {
2288 /* erase the old error information */
2289 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2290
2291 /* add it to software queue and then send it to the controller */
2292 addQ(&(h->reqQ), c);
2293 h->Qdepth++;
2294 if (h->Qdepth > h->maxQsinceinit)
2295 h->maxQsinceinit = h->Qdepth;
2296
2297 start_io(h);
2298 }
2299
2300 /* checks the status of the job and calls complete buffers to mark all
2301 * buffers for the completed job. Note that this function does not need
2302 * to hold the hba/queue lock.
2303 */
2304 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2305 int timeout)
2306 {
2307 int status = 1;
2308 int retry_cmd = 0;
2309
2310 if (timeout)
2311 status = 0;
2312
2313 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2314 switch (cmd->err_info->CommandStatus) {
2315 unsigned char sense_key;
2316 case CMD_TARGET_STATUS:
2317 status = 0;
2318
2319 if (cmd->err_info->ScsiStatus == 0x02) {
2320 printk(KERN_WARNING "cciss: cmd %p "
2321 "has CHECK CONDITION "
2322 " byte 2 = 0x%x\n", cmd,
2323 cmd->err_info->SenseInfo[2]
2324 );
2325 /* check the sense key */
2326 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2327 /* no status or recovered error */
2328 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2329 status = 1;
2330 }
2331 } else {
2332 printk(KERN_WARNING "cciss: cmd %p "
2333 "has SCSI Status 0x%x\n",
2334 cmd, cmd->err_info->ScsiStatus);
2335 }
2336 break;
2337 case CMD_DATA_UNDERRUN:
2338 printk(KERN_WARNING "cciss: cmd %p has"
2339 " completed with data underrun "
2340 "reported\n", cmd);
2341 break;
2342 case CMD_DATA_OVERRUN:
2343 printk(KERN_WARNING "cciss: cmd %p has"
2344 " completed with data overrun "
2345 "reported\n", cmd);
2346 break;
2347 case CMD_INVALID:
2348 printk(KERN_WARNING "cciss: cmd %p is "
2349 "reported invalid\n", cmd);
2350 status = 0;
2351 break;
2352 case CMD_PROTOCOL_ERR:
2353 printk(KERN_WARNING "cciss: cmd %p has "
2354 "protocol error \n", cmd);
2355 status = 0;
2356 break;
2357 case CMD_HARDWARE_ERR:
2358 printk(KERN_WARNING "cciss: cmd %p had "
2359 " hardware error\n", cmd);
2360 status = 0;
2361 break;
2362 case CMD_CONNECTION_LOST:
2363 printk(KERN_WARNING "cciss: cmd %p had "
2364 "connection lost\n", cmd);
2365 status = 0;
2366 break;
2367 case CMD_ABORTED:
2368 printk(KERN_WARNING "cciss: cmd %p was "
2369 "aborted\n", cmd);
2370 status = 0;
2371 break;
2372 case CMD_ABORT_FAILED:
2373 printk(KERN_WARNING "cciss: cmd %p reports "
2374 "abort failed\n", cmd);
2375 status = 0;
2376 break;
2377 case CMD_UNSOLICITED_ABORT:
2378 printk(KERN_WARNING "cciss%d: unsolicited "
2379 "abort %p\n", h->ctlr, cmd);
2380 if (cmd->retry_count < MAX_CMD_RETRIES) {
2381 retry_cmd = 1;
2382 printk(KERN_WARNING
2383 "cciss%d: retrying %p\n", h->ctlr, cmd);
2384 cmd->retry_count++;
2385 } else
2386 printk(KERN_WARNING
2387 "cciss%d: %p retried too "
2388 "many times\n", h->ctlr, cmd);
2389 status = 0;
2390 break;
2391 case CMD_TIMEOUT:
2392 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2393 status = 0;
2394 break;
2395 default:
2396 printk(KERN_WARNING "cciss: cmd %p returned "
2397 "unknown status %x\n", cmd,
2398 cmd->err_info->CommandStatus);
2399 status = 0;
2400 }
2401 }
2402 /* We need to return this command */
2403 if (retry_cmd) {
2404 resend_cciss_cmd(h, cmd);
2405 return;
2406 }
2407
2408 cmd->rq->completion_data = cmd;
2409 cmd->rq->errors = status;
2410 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2411 blk_complete_request(cmd->rq);
2412 }
2413
2414 /*
2415 * Get a request and submit it to the controller.
2416 */
2417 static void do_cciss_request(request_queue_t *q)
2418 {
2419 ctlr_info_t *h = q->queuedata;
2420 CommandList_struct *c;
2421 int start_blk, seg;
2422 struct request *creq;
2423 u64bit temp64;
2424 struct scatterlist tmp_sg[MAXSGENTRIES];
2425 drive_info_struct *drv;
2426 int i, dir;
2427
2428 /* We call start_io here in case there is a command waiting on the
2429 * queue that has not been sent.
2430 */
2431 if (blk_queue_plugged(q))
2432 goto startio;
2433
2434 queue:
2435 creq = elv_next_request(q);
2436 if (!creq)
2437 goto startio;
2438
2439 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2440
2441 if ((c = cmd_alloc(h, 1)) == NULL)
2442 goto full;
2443
2444 blkdev_dequeue_request(creq);
2445
2446 spin_unlock_irq(q->queue_lock);
2447
2448 c->cmd_type = CMD_RWREQ;
2449 c->rq = creq;
2450
2451 /* fill in the request */
2452 drv = creq->rq_disk->private_data;
2453 c->Header.ReplyQueue = 0; // unused in simple mode
2454 /* got command from pool, so use the command block index instead */
2455 /* for direct lookups. */
2456 /* The first 2 bits are reserved for controller error reporting. */
2457 c->Header.Tag.lower = (c->cmdindex << 3);
2458 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2459 c->Header.LUN.LogDev.VolId = drv->LunID;
2460 c->Header.LUN.LogDev.Mode = 1;
2461 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2462 c->Request.Type.Type = TYPE_CMD; // It is a command.
2463 c->Request.Type.Attribute = ATTR_SIMPLE;
2464 c->Request.Type.Direction =
2465 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2466 c->Request.Timeout = 0; // Don't time out
2467 c->Request.CDB[0] =
2468 (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2469 start_blk = creq->sector;
2470 #ifdef CCISS_DEBUG
2471 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2472 (int)creq->nr_sectors);
2473 #endif /* CCISS_DEBUG */
2474
2475 seg = blk_rq_map_sg(q, creq, tmp_sg);
2476
2477 /* get the DMA records for the setup */
2478 if (c->Request.Type.Direction == XFER_READ)
2479 dir = PCI_DMA_FROMDEVICE;
2480 else
2481 dir = PCI_DMA_TODEVICE;
2482
2483 for (i = 0; i < seg; i++) {
2484 c->SG[i].Len = tmp_sg[i].length;
2485 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2486 tmp_sg[i].offset,
2487 tmp_sg[i].length, dir);
2488 c->SG[i].Addr.lower = temp64.val32.lower;
2489 c->SG[i].Addr.upper = temp64.val32.upper;
2490 c->SG[i].Ext = 0; // we are not chaining
2491 }
2492 /* track how many SG entries we are using */
2493 if (seg > h->maxSG)
2494 h->maxSG = seg;
2495
2496 #ifdef CCISS_DEBUG
2497 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2498 creq->nr_sectors, seg);
2499 #endif /* CCISS_DEBUG */
2500
2501 c->Header.SGList = c->Header.SGTotal = seg;
2502 c->Request.CDB[1] = 0;
2503 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2504 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2505 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2506 c->Request.CDB[5] = start_blk & 0xff;
2507 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2508 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2509 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2510 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2511
2512 spin_lock_irq(q->queue_lock);
2513
2514 addQ(&(h->reqQ), c);
2515 h->Qdepth++;
2516 if (h->Qdepth > h->maxQsinceinit)
2517 h->maxQsinceinit = h->Qdepth;
2518
2519 goto queue;
2520 full:
2521 blk_stop_queue(q);
2522 startio:
2523 /* We will already have the driver lock here so not need
2524 * to lock it.
2525 */
2526 start_io(h);
2527 }
2528
2529 static inline unsigned long get_next_completion(ctlr_info_t *h)
2530 {
2531 #ifdef CONFIG_CISS_SCSI_TAPE
2532 /* Any rejects from sendcmd() lying around? Process them first */
2533 if (h->scsi_rejects.ncompletions == 0)
2534 return h->access.command_completed(h);
2535 else {
2536 struct sendcmd_reject_list *srl;
2537 int n;
2538 srl = &h->scsi_rejects;
2539 n = --srl->ncompletions;
2540 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2541 printk("p");
2542 return srl->complete[n];
2543 }
2544 #else
2545 return h->access.command_completed(h);
2546 #endif
2547 }
2548
2549 static inline int interrupt_pending(ctlr_info_t *h)
2550 {
2551 #ifdef CONFIG_CISS_SCSI_TAPE
2552 return (h->access.intr_pending(h)
2553 || (h->scsi_rejects.ncompletions > 0));
2554 #else
2555 return h->access.intr_pending(h);
2556 #endif
2557 }
2558
2559 static inline long interrupt_not_for_us(ctlr_info_t *h)
2560 {
2561 #ifdef CONFIG_CISS_SCSI_TAPE
2562 return (((h->access.intr_pending(h) == 0) ||
2563 (h->interrupts_enabled == 0))
2564 && (h->scsi_rejects.ncompletions == 0));
2565 #else
2566 return (((h->access.intr_pending(h) == 0) ||
2567 (h->interrupts_enabled == 0)));
2568 #endif
2569 }
2570
2571 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2572 {
2573 ctlr_info_t *h = dev_id;
2574 CommandList_struct *c;
2575 unsigned long flags;
2576 __u32 a, a1, a2;
2577
2578 if (interrupt_not_for_us(h))
2579 return IRQ_NONE;
2580 /*
2581 * If there are completed commands in the completion queue,
2582 * we had better do something about it.
2583 */
2584 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2585 while (interrupt_pending(h)) {
2586 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2587 a1 = a;
2588 if ((a & 0x04)) {
2589 a2 = (a >> 3);
2590 if (a2 >= NR_CMDS) {
2591 printk(KERN_WARNING
2592 "cciss: controller cciss%d failed, stopping.\n",
2593 h->ctlr);
2594 fail_all_cmds(h->ctlr);
2595 return IRQ_HANDLED;
2596 }
2597
2598 c = h->cmd_pool + a2;
2599 a = c->busaddr;
2600
2601 } else {
2602 a &= ~3;
2603 if ((c = h->cmpQ) == NULL) {
2604 printk(KERN_WARNING
2605 "cciss: Completion of %08x ignored\n",
2606 a1);
2607 continue;
2608 }
2609 while (c->busaddr != a) {
2610 c = c->next;
2611 if (c == h->cmpQ)
2612 break;
2613 }
2614 }
2615 /*
2616 * If we've found the command, take it off the
2617 * completion Q and free it
2618 */
2619 if (c->busaddr == a) {
2620 removeQ(&h->cmpQ, c);
2621 if (c->cmd_type == CMD_RWREQ) {
2622 complete_command(h, c, 0);
2623 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2624 complete(c->waiting);
2625 }
2626 # ifdef CONFIG_CISS_SCSI_TAPE
2627 else if (c->cmd_type == CMD_SCSI)
2628 complete_scsi_command(c, 0, a1);
2629 # endif
2630 continue;
2631 }
2632 }
2633 }
2634
2635 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2636 return IRQ_HANDLED;
2637 }
2638
2639 /*
2640 * We cannot read the structure directly, for portability we must use
2641 * the io functions.
2642 * This is for debug only.
2643 */
2644 #ifdef CCISS_DEBUG
2645 static void print_cfg_table(CfgTable_struct *tb)
2646 {
2647 int i;
2648 char temp_name[17];
2649
2650 printk("Controller Configuration information\n");
2651 printk("------------------------------------\n");
2652 for (i = 0; i < 4; i++)
2653 temp_name[i] = readb(&(tb->Signature[i]));
2654 temp_name[4] = '\0';
2655 printk(" Signature = %s\n", temp_name);
2656 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2657 printk(" Transport methods supported = 0x%x\n",
2658 readl(&(tb->TransportSupport)));
2659 printk(" Transport methods active = 0x%x\n",
2660 readl(&(tb->TransportActive)));
2661 printk(" Requested transport Method = 0x%x\n",
2662 readl(&(tb->HostWrite.TransportRequest)));
2663 printk(" Coalesce Interrupt Delay = 0x%x\n",
2664 readl(&(tb->HostWrite.CoalIntDelay)));
2665 printk(" Coalesce Interrupt Count = 0x%x\n",
2666 readl(&(tb->HostWrite.CoalIntCount)));
2667 printk(" Max outstanding commands = 0x%d\n",
2668 readl(&(tb->CmdsOutMax)));
2669 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2670 for (i = 0; i < 16; i++)
2671 temp_name[i] = readb(&(tb->ServerName[i]));
2672 temp_name[16] = '\0';
2673 printk(" Server Name = %s\n", temp_name);
2674 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2675 }
2676 #endif /* CCISS_DEBUG */
2677
2678 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2679 {
2680 int i, offset, mem_type, bar_type;
2681 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2682 return 0;
2683 offset = 0;
2684 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2685 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2686 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2687 offset += 4;
2688 else {
2689 mem_type = pci_resource_flags(pdev, i) &
2690 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2691 switch (mem_type) {
2692 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2693 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2694 offset += 4; /* 32 bit */
2695 break;
2696 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2697 offset += 8;
2698 break;
2699 default: /* reserved in PCI 2.2 */
2700 printk(KERN_WARNING
2701 "Base address is invalid\n");
2702 return -1;
2703 break;
2704 }
2705 }
2706 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2707 return i + 1;
2708 }
2709 return -1;
2710 }
2711
2712 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2713 * controllers that are capable. If not, we use IO-APIC mode.
2714 */
2715
2716 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2717 struct pci_dev *pdev, __u32 board_id)
2718 {
2719 #ifdef CONFIG_PCI_MSI
2720 int err;
2721 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2722 {0, 2}, {0, 3}
2723 };
2724
2725 /* Some boards advertise MSI but don't really support it */
2726 if ((board_id == 0x40700E11) ||
2727 (board_id == 0x40800E11) ||
2728 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2729 goto default_int_mode;
2730
2731 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2732 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2733 if (!err) {
2734 c->intr[0] = cciss_msix_entries[0].vector;
2735 c->intr[1] = cciss_msix_entries[1].vector;
2736 c->intr[2] = cciss_msix_entries[2].vector;
2737 c->intr[3] = cciss_msix_entries[3].vector;
2738 c->msix_vector = 1;
2739 return;
2740 }
2741 if (err > 0) {
2742 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2743 "available\n", err);
2744 } else {
2745 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2746 err);
2747 }
2748 }
2749 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2750 if (!pci_enable_msi(pdev)) {
2751 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2752 c->msi_vector = 1;
2753 return;
2754 } else {
2755 printk(KERN_WARNING "cciss: MSI init failed\n");
2756 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2757 return;
2758 }
2759 }
2760 default_int_mode:
2761 #endif /* CONFIG_PCI_MSI */
2762 /* if we get here we're going to use the default interrupt mode */
2763 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2764 return;
2765 }
2766
2767 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2768 {
2769 ushort subsystem_vendor_id, subsystem_device_id, command;
2770 __u32 board_id, scratchpad = 0;
2771 __u64 cfg_offset;
2772 __u32 cfg_base_addr;
2773 __u64 cfg_base_addr_index;
2774 int i, err;
2775
2776 /* check to see if controller has been disabled */
2777 /* BEFORE trying to enable it */
2778 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2779 if (!(command & 0x02)) {
2780 printk(KERN_WARNING
2781 "cciss: controller appears to be disabled\n");
2782 return -ENODEV;
2783 }
2784
2785 err = pci_enable_device(pdev);
2786 if (err) {
2787 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2788 return err;
2789 }
2790
2791 err = pci_request_regions(pdev, "cciss");
2792 if (err) {
2793 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2794 "aborting\n");
2795 goto err_out_disable_pdev;
2796 }
2797
2798 subsystem_vendor_id = pdev->subsystem_vendor;
2799 subsystem_device_id = pdev->subsystem_device;
2800 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2801 subsystem_vendor_id);
2802
2803 #ifdef CCISS_DEBUG
2804 printk("command = %x\n", command);
2805 printk("irq = %x\n", pdev->irq);
2806 printk("board_id = %x\n", board_id);
2807 #endif /* CCISS_DEBUG */
2808
2809 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2810 * else we use the IO-APIC interrupt assigned to us by system ROM.
2811 */
2812 cciss_interrupt_mode(c, pdev, board_id);
2813
2814 /*
2815 * Memory base addr is first addr , the second points to the config
2816 * table
2817 */
2818
2819 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2820 #ifdef CCISS_DEBUG
2821 printk("address 0 = %x\n", c->paddr);
2822 #endif /* CCISS_DEBUG */
2823 c->vaddr = remap_pci_mem(c->paddr, 200);
2824
2825 /* Wait for the board to become ready. (PCI hotplug needs this.)
2826 * We poll for up to 120 secs, once per 100ms. */
2827 for (i = 0; i < 1200; i++) {
2828 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2829 if (scratchpad == CCISS_FIRMWARE_READY)
2830 break;
2831 set_current_state(TASK_INTERRUPTIBLE);
2832 schedule_timeout(HZ / 10); /* wait 100ms */
2833 }
2834 if (scratchpad != CCISS_FIRMWARE_READY) {
2835 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2836 err = -ENODEV;
2837 goto err_out_free_res;
2838 }
2839
2840 /* get the address index number */
2841 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2842 cfg_base_addr &= (__u32) 0x0000ffff;
2843 #ifdef CCISS_DEBUG
2844 printk("cfg base address = %x\n", cfg_base_addr);
2845 #endif /* CCISS_DEBUG */
2846 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2847 #ifdef CCISS_DEBUG
2848 printk("cfg base address index = %x\n", cfg_base_addr_index);
2849 #endif /* CCISS_DEBUG */
2850 if (cfg_base_addr_index == -1) {
2851 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2852 err = -ENODEV;
2853 goto err_out_free_res;
2854 }
2855
2856 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2857 #ifdef CCISS_DEBUG
2858 printk("cfg offset = %x\n", cfg_offset);
2859 #endif /* CCISS_DEBUG */
2860 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2861 cfg_base_addr_index) +
2862 cfg_offset, sizeof(CfgTable_struct));
2863 c->board_id = board_id;
2864
2865 #ifdef CCISS_DEBUG
2866 print_cfg_table(c->cfgtable);
2867 #endif /* CCISS_DEBUG */
2868
2869 for (i = 0; i < ARRAY_SIZE(products); i++) {
2870 if (board_id == products[i].board_id) {
2871 c->product_name = products[i].product_name;
2872 c->access = *(products[i].access);
2873 break;
2874 }
2875 }
2876 if (i == ARRAY_SIZE(products)) {
2877 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2878 " to access the Smart Array controller %08lx\n",
2879 (unsigned long)board_id);
2880 err = -ENODEV;
2881 goto err_out_free_res;
2882 }
2883 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2884 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2885 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2886 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2887 printk("Does not appear to be a valid CISS config table\n");
2888 err = -ENODEV;
2889 goto err_out_free_res;
2890 }
2891 #ifdef CONFIG_X86
2892 {
2893 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2894 __u32 prefetch;
2895 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2896 prefetch |= 0x100;
2897 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2898 }
2899 #endif
2900
2901 #ifdef CCISS_DEBUG
2902 printk("Trying to put board into Simple mode\n");
2903 #endif /* CCISS_DEBUG */
2904 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2905 /* Update the field, and then ring the doorbell */
2906 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2907 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2908
2909 /* under certain very rare conditions, this can take awhile.
2910 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2911 * as we enter this code.) */
2912 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2913 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2914 break;
2915 /* delay and try again */
2916 set_current_state(TASK_INTERRUPTIBLE);
2917 schedule_timeout(10);
2918 }
2919
2920 #ifdef CCISS_DEBUG
2921 printk(KERN_DEBUG "I counter got to %d %x\n", i,
2922 readl(c->vaddr + SA5_DOORBELL));
2923 #endif /* CCISS_DEBUG */
2924 #ifdef CCISS_DEBUG
2925 print_cfg_table(c->cfgtable);
2926 #endif /* CCISS_DEBUG */
2927
2928 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
2929 printk(KERN_WARNING "cciss: unable to get board into"
2930 " simple mode\n");
2931 err = -ENODEV;
2932 goto err_out_free_res;
2933 }
2934 return 0;
2935
2936 err_out_free_res:
2937 pci_release_regions(pdev);
2938
2939 err_out_disable_pdev:
2940 pci_disable_device(pdev);
2941 return err;
2942 }
2943
2944 /*
2945 * Gets information about the local volumes attached to the controller.
2946 */
2947 static void cciss_getgeometry(int cntl_num)
2948 {
2949 ReportLunData_struct *ld_buff;
2950 ReadCapdata_struct *size_buff;
2951 InquiryData_struct *inq_buff;
2952 int return_code;
2953 int i;
2954 int listlength = 0;
2955 __u32 lunid = 0;
2956 int block_size;
2957 int total_size;
2958
2959 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2960 if (ld_buff == NULL) {
2961 printk(KERN_ERR "cciss: out of memory\n");
2962 return;
2963 }
2964 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2965 if (size_buff == NULL) {
2966 printk(KERN_ERR "cciss: out of memory\n");
2967 kfree(ld_buff);
2968 return;
2969 }
2970 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2971 if (inq_buff == NULL) {
2972 printk(KERN_ERR "cciss: out of memory\n");
2973 kfree(ld_buff);
2974 kfree(size_buff);
2975 return;
2976 }
2977 /* Get the firmware version */
2978 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2979 sizeof(InquiryData_struct), 0, 0, 0, NULL,
2980 TYPE_CMD);
2981 if (return_code == IO_OK) {
2982 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2983 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2984 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2985 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2986 } else { /* send command failed */
2987
2988 printk(KERN_WARNING "cciss: unable to determine firmware"
2989 " version of controller\n");
2990 }
2991 /* Get the number of logical volumes */
2992 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2993 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
2994 TYPE_CMD);
2995
2996 if (return_code == IO_OK) {
2997 #ifdef CCISS_DEBUG
2998 printk("LUN Data\n--------------------------\n");
2999 #endif /* CCISS_DEBUG */
3000
3001 listlength |=
3002 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3003 listlength |=
3004 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3005 listlength |=
3006 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3007 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3008 } else { /* reading number of logical volumes failed */
3009
3010 printk(KERN_WARNING "cciss: report logical volume"
3011 " command failed\n");
3012 listlength = 0;
3013 }
3014 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3015 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3016 printk(KERN_ERR
3017 "ciss: only %d number of logical volumes supported\n",
3018 CISS_MAX_LUN);
3019 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3020 }
3021 #ifdef CCISS_DEBUG
3022 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3023 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3024 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3025 hba[cntl_num]->num_luns);
3026 #endif /* CCISS_DEBUG */
3027
3028 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3029 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3030 for (i = 0; i < CISS_MAX_LUN; i++) {
3031 if (i < hba[cntl_num]->num_luns) {
3032 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3033 << 24;
3034 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3035 << 16;
3036 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3037 << 8;
3038 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3039
3040 hba[cntl_num]->drv[i].LunID = lunid;
3041
3042 #ifdef CCISS_DEBUG
3043 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3044 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3045 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3046 hba[cntl_num]->drv[i].LunID);
3047 #endif /* CCISS_DEBUG */
3048 cciss_read_capacity(cntl_num, i, size_buff, 0,
3049 &total_size, &block_size);
3050 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3051 block_size, inq_buff,
3052 &hba[cntl_num]->drv[i]);
3053 } else {
3054 /* initialize raid_level to indicate a free space */
3055 hba[cntl_num]->drv[i].raid_level = -1;
3056 }
3057 }
3058 kfree(ld_buff);
3059 kfree(size_buff);
3060 kfree(inq_buff);
3061 }
3062
3063 /* Function to find the first free pointer into our hba[] array */
3064 /* Returns -1 if no free entries are left. */
3065 static int alloc_cciss_hba(void)
3066 {
3067 struct gendisk *disk[NWD];
3068 int i, n;
3069 for (n = 0; n < NWD; n++) {
3070 disk[n] = alloc_disk(1 << NWD_SHIFT);
3071 if (!disk[n])
3072 goto out;
3073 }
3074
3075 for (i = 0; i < MAX_CTLR; i++) {
3076 if (!hba[i]) {
3077 ctlr_info_t *p;
3078 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3079 if (!p)
3080 goto Enomem;
3081 for (n = 0; n < NWD; n++)
3082 p->gendisk[n] = disk[n];
3083 hba[i] = p;
3084 return i;
3085 }
3086 }
3087 printk(KERN_WARNING "cciss: This driver supports a maximum"
3088 " of %d controllers.\n", MAX_CTLR);
3089 goto out;
3090 Enomem:
3091 printk(KERN_ERR "cciss: out of memory.\n");
3092 out:
3093 while (n--)
3094 put_disk(disk[n]);
3095 return -1;
3096 }
3097
3098 static void free_hba(int i)
3099 {
3100 ctlr_info_t *p = hba[i];
3101 int n;
3102
3103 hba[i] = NULL;
3104 for (n = 0; n < NWD; n++)
3105 put_disk(p->gendisk[n]);
3106 kfree(p);
3107 }
3108
3109 /*
3110 * This is it. Find all the controllers and register them. I really hate
3111 * stealing all these major device numbers.
3112 * returns the number of block devices registered.
3113 */
3114 static int __devinit cciss_init_one(struct pci_dev *pdev,
3115 const struct pci_device_id *ent)
3116 {
3117 request_queue_t *q;
3118 int i;
3119 int j;
3120 int rc;
3121 int dac;
3122
3123 i = alloc_cciss_hba();
3124 if (i < 0)
3125 return -1;
3126
3127 hba[i]->busy_initializing = 1;
3128
3129 if (cciss_pci_init(hba[i], pdev) != 0)
3130 goto clean1;
3131
3132 sprintf(hba[i]->devname, "cciss%d", i);
3133 hba[i]->ctlr = i;
3134 hba[i]->pdev = pdev;
3135
3136 /* configure PCI DMA stuff */
3137 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3138 dac = 1;
3139 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3140 dac = 0;
3141 else {
3142 printk(KERN_ERR "cciss: no suitable DMA available\n");
3143 goto clean1;
3144 }
3145
3146 /*
3147 * register with the major number, or get a dynamic major number
3148 * by passing 0 as argument. This is done for greater than
3149 * 8 controller support.
3150 */
3151 if (i < MAX_CTLR_ORIG)
3152 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3153 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3154 if (rc == -EBUSY || rc == -EINVAL) {
3155 printk(KERN_ERR
3156 "cciss: Unable to get major number %d for %s "
3157 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3158 goto clean1;
3159 } else {
3160 if (i >= MAX_CTLR_ORIG)
3161 hba[i]->major = rc;
3162 }
3163
3164 /* make sure the board interrupts are off */
3165 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3166 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3167 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3168 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3169 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3170 goto clean2;
3171 }
3172
3173 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3174 hba[i]->devname, pdev->device, pci_name(pdev),
3175 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3176
3177 hba[i]->cmd_pool_bits =
3178 kmalloc(((NR_CMDS + BITS_PER_LONG -
3179 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3180 hba[i]->cmd_pool = (CommandList_struct *)
3181 pci_alloc_consistent(hba[i]->pdev,
3182 NR_CMDS * sizeof(CommandList_struct),
3183 &(hba[i]->cmd_pool_dhandle));
3184 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3185 pci_alloc_consistent(hba[i]->pdev,
3186 NR_CMDS * sizeof(ErrorInfo_struct),
3187 &(hba[i]->errinfo_pool_dhandle));
3188 if ((hba[i]->cmd_pool_bits == NULL)
3189 || (hba[i]->cmd_pool == NULL)
3190 || (hba[i]->errinfo_pool == NULL)) {
3191 printk(KERN_ERR "cciss: out of memory");
3192 goto clean4;
3193 }
3194 #ifdef CONFIG_CISS_SCSI_TAPE
3195 hba[i]->scsi_rejects.complete =
3196 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3197 (NR_CMDS + 5), GFP_KERNEL);
3198 if (hba[i]->scsi_rejects.complete == NULL) {
3199 printk(KERN_ERR "cciss: out of memory");
3200 goto clean4;
3201 }
3202 #endif
3203 spin_lock_init(&hba[i]->lock);
3204
3205 /* Initialize the pdev driver private data.
3206 have it point to hba[i]. */
3207 pci_set_drvdata(pdev, hba[i]);
3208 /* command and error info recs zeroed out before
3209 they are used */
3210 memset(hba[i]->cmd_pool_bits, 0,
3211 ((NR_CMDS + BITS_PER_LONG -
3212 1) / BITS_PER_LONG) * sizeof(unsigned long));
3213
3214 #ifdef CCISS_DEBUG
3215 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3216 #endif /* CCISS_DEBUG */
3217
3218 cciss_getgeometry(i);
3219
3220 cciss_scsi_setup(i);
3221
3222 /* Turn the interrupts on so we can service requests */
3223 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3224
3225 cciss_procinit(i);
3226 hba[i]->busy_initializing = 0;
3227
3228 for (j = 0; j < NWD; j++) { /* mfm */
3229 drive_info_struct *drv = &(hba[i]->drv[j]);
3230 struct gendisk *disk = hba[i]->gendisk[j];
3231
3232 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3233 if (!q) {
3234 printk(KERN_ERR
3235 "cciss: unable to allocate queue for disk %d\n",
3236 j);
3237 break;
3238 }
3239 drv->queue = q;
3240
3241 q->backing_dev_info.ra_pages = READ_AHEAD;
3242 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3243
3244 /* This is a hardware imposed limit. */
3245 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3246
3247 /* This is a limit in the driver and could be eliminated. */
3248 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3249
3250 blk_queue_max_sectors(q, 512);
3251
3252 blk_queue_softirq_done(q, cciss_softirq_done);
3253
3254 q->queuedata = hba[i];
3255 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3256 disk->major = hba[i]->major;
3257 disk->first_minor = j << NWD_SHIFT;
3258 disk->fops = &cciss_fops;
3259 disk->queue = q;
3260 disk->private_data = drv;
3261 disk->driverfs_dev = &pdev->dev;
3262 /* we must register the controller even if no disks exist */
3263 /* this is for the online array utilities */
3264 if (!drv->heads && j)
3265 continue;
3266 blk_queue_hardsect_size(q, drv->block_size);
3267 set_capacity(disk, drv->nr_blocks);
3268 add_disk(disk);
3269 }
3270
3271 return 1;
3272
3273 clean4:
3274 #ifdef CONFIG_CISS_SCSI_TAPE
3275 kfree(hba[i]->scsi_rejects.complete);
3276 #endif
3277 kfree(hba[i]->cmd_pool_bits);
3278 if (hba[i]->cmd_pool)
3279 pci_free_consistent(hba[i]->pdev,
3280 NR_CMDS * sizeof(CommandList_struct),
3281 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3282 if (hba[i]->errinfo_pool)
3283 pci_free_consistent(hba[i]->pdev,
3284 NR_CMDS * sizeof(ErrorInfo_struct),
3285 hba[i]->errinfo_pool,
3286 hba[i]->errinfo_pool_dhandle);
3287 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3288 clean2:
3289 unregister_blkdev(hba[i]->major, hba[i]->devname);
3290 clean1:
3291 hba[i]->busy_initializing = 0;
3292 free_hba(i);
3293 return -1;
3294 }
3295
3296 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3297 {
3298 ctlr_info_t *tmp_ptr;
3299 int i, j;
3300 char flush_buf[4];
3301 int return_code;
3302
3303 if (pci_get_drvdata(pdev) == NULL) {
3304 printk(KERN_ERR "cciss: Unable to remove device \n");
3305 return;
3306 }
3307 tmp_ptr = pci_get_drvdata(pdev);
3308 i = tmp_ptr->ctlr;
3309 if (hba[i] == NULL) {
3310 printk(KERN_ERR "cciss: device appears to "
3311 "already be removed \n");
3312 return;
3313 }
3314 /* Turn board interrupts off and send the flush cache command */
3315 /* sendcmd will turn off interrupt, and send the flush...
3316 * To write all data in the battery backed cache to disks */
3317 memset(flush_buf, 0, 4);
3318 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3319 TYPE_CMD);
3320 if (return_code != IO_OK) {
3321 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3322 i);
3323 }
3324 free_irq(hba[i]->intr[2], hba[i]);
3325
3326 #ifdef CONFIG_PCI_MSI
3327 if (hba[i]->msix_vector)
3328 pci_disable_msix(hba[i]->pdev);
3329 else if (hba[i]->msi_vector)
3330 pci_disable_msi(hba[i]->pdev);
3331 #endif /* CONFIG_PCI_MSI */
3332
3333 iounmap(hba[i]->vaddr);
3334 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3335 unregister_blkdev(hba[i]->major, hba[i]->devname);
3336 remove_proc_entry(hba[i]->devname, proc_cciss);
3337
3338 /* remove it from the disk list */
3339 for (j = 0; j < NWD; j++) {
3340 struct gendisk *disk = hba[i]->gendisk[j];
3341 if (disk) {
3342 request_queue_t *q = disk->queue;
3343
3344 if (disk->flags & GENHD_FL_UP)
3345 del_gendisk(disk);
3346 if (q)
3347 blk_cleanup_queue(q);
3348 }
3349 }
3350
3351 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3352 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3353 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3354 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3355 kfree(hba[i]->cmd_pool_bits);
3356 #ifdef CONFIG_CISS_SCSI_TAPE
3357 kfree(hba[i]->scsi_rejects.complete);
3358 #endif
3359 pci_release_regions(pdev);
3360 pci_disable_device(pdev);
3361 pci_set_drvdata(pdev, NULL);
3362 free_hba(i);
3363 }
3364
3365 static struct pci_driver cciss_pci_driver = {
3366 .name = "cciss",
3367 .probe = cciss_init_one,
3368 .remove = __devexit_p(cciss_remove_one),
3369 .id_table = cciss_pci_device_id, /* id_table */
3370 };
3371
3372 /*
3373 * This is it. Register the PCI driver information for the cards we control
3374 * the OS will call our registered routines when it finds one of our cards.
3375 */
3376 static int __init cciss_init(void)
3377 {
3378 printk(KERN_INFO DRIVER_NAME "\n");
3379
3380 /* Register for our PCI devices */
3381 return pci_register_driver(&cciss_pci_driver);
3382 }
3383
3384 static void __exit cciss_cleanup(void)
3385 {
3386 int i;
3387
3388 pci_unregister_driver(&cciss_pci_driver);
3389 /* double check that all controller entrys have been removed */
3390 for (i = 0; i < MAX_CTLR; i++) {
3391 if (hba[i] != NULL) {
3392 printk(KERN_WARNING "cciss: had to remove"
3393 " controller %d\n", i);
3394 cciss_remove_one(hba[i]->pdev);
3395 }
3396 }
3397 remove_proc_entry("cciss", proc_root_driver);
3398 }
3399
3400 static void fail_all_cmds(unsigned long ctlr)
3401 {
3402 /* If we get here, the board is apparently dead. */
3403 ctlr_info_t *h = hba[ctlr];
3404 CommandList_struct *c;
3405 unsigned long flags;
3406
3407 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3408 h->alive = 0; /* the controller apparently died... */
3409
3410 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3411
3412 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3413
3414 /* move everything off the request queue onto the completed queue */
3415 while ((c = h->reqQ) != NULL) {
3416 removeQ(&(h->reqQ), c);
3417 h->Qdepth--;
3418 addQ(&(h->cmpQ), c);
3419 }
3420
3421 /* Now, fail everything on the completed queue with a HW error */
3422 while ((c = h->cmpQ) != NULL) {
3423 removeQ(&h->cmpQ, c);
3424 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3425 if (c->cmd_type == CMD_RWREQ) {
3426 complete_command(h, c, 0);
3427 } else if (c->cmd_type == CMD_IOCTL_PEND)
3428 complete(c->waiting);
3429 #ifdef CONFIG_CISS_SCSI_TAPE
3430 else if (c->cmd_type == CMD_SCSI)
3431 complete_scsi_command(c, 0, 0);
3432 #endif
3433 }
3434 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3435 return;
3436 }
3437
3438 module_init(cciss_init);
3439 module_exit(cciss_cleanup);
This page took 0.102891 seconds and 6 git commands to generate.