Drop 'size' argument from bio_endio and bi_end_io
[deliverable/linux.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48 #include <scsi/scsi.h>
49 #include <scsi/sg.h>
50 #include <scsi/scsi_ioctl.h>
51 #include <linux/cdrom.h>
52
53 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
54 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
55 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
56
57 /* Embedded module documentation macros - see modules.h */
58 MODULE_AUTHOR("Hewlett-Packard Company");
59 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
60 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
61 " SA6i P600 P800 P400 P400i E200 E200i E500");
62 MODULE_VERSION("3.6.14");
63 MODULE_LICENSE("GPL");
64
65 #include "cciss_cmd.h"
66 #include "cciss.h"
67 #include <linux/cciss_ioctl.h>
68
69 /* define the PCI info for the cards we can control */
70 static const struct pci_device_id cciss_pci_device_id[] = {
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
97
98 /* board_id = Subsystem Device ID & Vendor ID
99 * product = Marketing Name for the board
100 * access = Address of the struct of function pointers
101 * nr_cmds = Number of commands supported by controller
102 */
103 static struct board_type products[] = {
104 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
105 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
106 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
107 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
108 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
109 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
110 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
111 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
112 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
113 {0x3225103C, "Smart Array P600", &SA5_access, 512},
114 {0x3223103C, "Smart Array P800", &SA5_access, 512},
115 {0x3234103C, "Smart Array P400", &SA5_access, 512},
116 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
117 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3212103C, "Smart Array E200", &SA5_access, 120},
119 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
121 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3237103C, "Smart Array E500", &SA5_access, 512},
123 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
125 };
126
127 /* How long to wait (in milliseconds) for board to go into simple mode */
128 #define MAX_CONFIG_WAIT 30000
129 #define MAX_IOCTL_CONFIG_WAIT 1000
130
131 /*define how many times we will try a command because of bus resets */
132 #define MAX_CMD_RETRIES 3
133
134 #define READ_AHEAD 1024
135 #define MAX_CTLR 32
136
137 /* Originally cciss driver only supports 8 major numbers */
138 #define MAX_CTLR_ORIG 8
139
140 static ctlr_info_t *hba[MAX_CTLR];
141
142 static void do_cciss_request(struct request_queue *q);
143 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144 static int cciss_open(struct inode *inode, struct file *filep);
145 static int cciss_release(struct inode *inode, struct file *filep);
146 static int cciss_ioctl(struct inode *inode, struct file *filep,
147 unsigned int cmd, unsigned long arg);
148 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
149
150 static int cciss_revalidate(struct gendisk *disk);
151 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
152 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
153 int clear_all);
154
155 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
156 sector_t *total_size, unsigned int *block_size);
157 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
158 sector_t *total_size, unsigned int *block_size);
159 static void cciss_geometry_inquiry(int ctlr, int logvol,
160 int withirq, sector_t total_size,
161 unsigned int block_size, InquiryData_struct *inq_buff,
162 drive_info_struct *drv);
163 static void cciss_getgeometry(int cntl_num);
164 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
165 __u32);
166 static void start_io(ctlr_info_t *h);
167 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
170 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
171 unsigned int use_unit_num, unsigned int log_unit,
172 __u8 page_code, int cmd_type);
173
174 static void fail_all_cmds(unsigned long ctlr);
175
176 #ifdef CONFIG_PROC_FS
177 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
178 int length, int *eof, void *data);
179 static void cciss_procinit(int i);
180 #else
181 static void cciss_procinit(int i)
182 {
183 }
184 #endif /* CONFIG_PROC_FS */
185
186 #ifdef CONFIG_COMPAT
187 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
188 #endif
189
190 static struct block_device_operations cciss_fops = {
191 .owner = THIS_MODULE,
192 .open = cciss_open,
193 .release = cciss_release,
194 .ioctl = cciss_ioctl,
195 .getgeo = cciss_getgeo,
196 #ifdef CONFIG_COMPAT
197 .compat_ioctl = cciss_compat_ioctl,
198 #endif
199 .revalidate_disk = cciss_revalidate,
200 };
201
202 /*
203 * Enqueuing and dequeuing functions for cmdlists.
204 */
205 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
206 {
207 if (*Qptr == NULL) {
208 *Qptr = c;
209 c->next = c->prev = c;
210 } else {
211 c->prev = (*Qptr)->prev;
212 c->next = (*Qptr);
213 (*Qptr)->prev->next = c;
214 (*Qptr)->prev = c;
215 }
216 }
217
218 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
219 CommandList_struct *c)
220 {
221 if (c && c->next != c) {
222 if (*Qptr == c)
223 *Qptr = c->next;
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
226 } else {
227 *Qptr = NULL;
228 }
229 return c;
230 }
231
232 #include "cciss_scsi.c" /* For SCSI tape support */
233
234 #define RAID_UNKNOWN 6
235
236 #ifdef CONFIG_PROC_FS
237
238 /*
239 * Report information about this controller.
240 */
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 "UNKNOWN"
245 };
246
247 static struct proc_dir_entry *proc_cciss;
248
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
251 {
252 off_t pos = 0;
253 off_t len = 0;
254 int size, i, ctlr;
255 ctlr_info_t *h = (ctlr_info_t *) data;
256 drive_info_struct *drv;
257 unsigned long flags;
258 sector_t vol_sz, vol_sz_frac;
259
260 ctlr = h->ctlr;
261
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
264 */
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY;
269 }
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
272
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Max sectors: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns,
290 h->cciss_max_sectors,
291 h->Qdepth, h->commands_outstanding,
292 h->maxQsinceinit, h->max_outstanding, h->maxSG);
293
294 pos += size;
295 len += size;
296 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
297 for (i = 0; i <= h->highest_lun; i++) {
298
299 drv = &h->drv[i];
300 if (drv->heads == 0)
301 continue;
302
303 vol_sz = drv->nr_blocks;
304 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
305 vol_sz_frac *= 100;
306 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
307
308 if (drv->raid_level > 5)
309 drv->raid_level = RAID_UNKNOWN;
310 size = sprintf(buffer + len, "cciss/c%dd%d:"
311 "\t%4u.%02uGB\tRAID %s\n",
312 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
313 raid_label[drv->raid_level]);
314 pos += size;
315 len += size;
316 }
317
318 *eof = 1;
319 *start = buffer + offset;
320 len -= offset;
321 if (len > length)
322 len = length;
323 h->busy_configuring = 0;
324 return len;
325 }
326
327 static int
328 cciss_proc_write(struct file *file, const char __user *buffer,
329 unsigned long count, void *data)
330 {
331 unsigned char cmd[80];
332 int len;
333 #ifdef CONFIG_CISS_SCSI_TAPE
334 ctlr_info_t *h = (ctlr_info_t *) data;
335 int rc;
336 #endif
337
338 if (count > sizeof(cmd) - 1)
339 return -EINVAL;
340 if (copy_from_user(cmd, buffer, count))
341 return -EFAULT;
342 cmd[count] = '\0';
343 len = strlen(cmd); // above 3 lines ensure safety
344 if (len && cmd[len - 1] == '\n')
345 cmd[--len] = '\0';
346 # ifdef CONFIG_CISS_SCSI_TAPE
347 if (strcmp("engage scsi", cmd) == 0) {
348 rc = cciss_engage_scsi(h->ctlr);
349 if (rc != 0)
350 return -rc;
351 return count;
352 }
353 /* might be nice to have "disengage" too, but it's not
354 safely possible. (only 1 module use count, lock issues.) */
355 # endif
356 return -EINVAL;
357 }
358
359 /*
360 * Get us a file in /proc/cciss that says something about each controller.
361 * Create /proc/cciss if it doesn't exist yet.
362 */
363 static void __devinit cciss_procinit(int i)
364 {
365 struct proc_dir_entry *pde;
366
367 if (proc_cciss == NULL) {
368 proc_cciss = proc_mkdir("cciss", proc_root_driver);
369 if (!proc_cciss)
370 return;
371 }
372
373 pde = create_proc_read_entry(hba[i]->devname,
374 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
375 proc_cciss, cciss_proc_get_info, hba[i]);
376 pde->write_proc = cciss_proc_write;
377 }
378 #endif /* CONFIG_PROC_FS */
379
380 /*
381 * For operations that cannot sleep, a command block is allocated at init,
382 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
383 * which ones are free or in use. For operations that can wait for kmalloc
384 * to possible sleep, this routine can be called with get_from_pool set to 0.
385 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
386 */
387 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
388 {
389 CommandList_struct *c;
390 int i;
391 u64bit temp64;
392 dma_addr_t cmd_dma_handle, err_dma_handle;
393
394 if (!get_from_pool) {
395 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
396 sizeof(CommandList_struct), &cmd_dma_handle);
397 if (c == NULL)
398 return NULL;
399 memset(c, 0, sizeof(CommandList_struct));
400
401 c->cmdindex = -1;
402
403 c->err_info = (ErrorInfo_struct *)
404 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
405 &err_dma_handle);
406
407 if (c->err_info == NULL) {
408 pci_free_consistent(h->pdev,
409 sizeof(CommandList_struct), c, cmd_dma_handle);
410 return NULL;
411 }
412 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
413 } else { /* get it out of the controllers pool */
414
415 do {
416 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
417 if (i == h->nr_cmds)
418 return NULL;
419 } while (test_and_set_bit
420 (i & (BITS_PER_LONG - 1),
421 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
422 #ifdef CCISS_DEBUG
423 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
424 #endif
425 c = h->cmd_pool + i;
426 memset(c, 0, sizeof(CommandList_struct));
427 cmd_dma_handle = h->cmd_pool_dhandle
428 + i * sizeof(CommandList_struct);
429 c->err_info = h->errinfo_pool + i;
430 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
431 err_dma_handle = h->errinfo_pool_dhandle
432 + i * sizeof(ErrorInfo_struct);
433 h->nr_allocs++;
434
435 c->cmdindex = i;
436 }
437
438 c->busaddr = (__u32) cmd_dma_handle;
439 temp64.val = (__u64) err_dma_handle;
440 c->ErrDesc.Addr.lower = temp64.val32.lower;
441 c->ErrDesc.Addr.upper = temp64.val32.upper;
442 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
443
444 c->ctlr = h->ctlr;
445 return c;
446 }
447
448 /*
449 * Frees a command block that was previously allocated with cmd_alloc().
450 */
451 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
452 {
453 int i;
454 u64bit temp64;
455
456 if (!got_from_pool) {
457 temp64.val32.lower = c->ErrDesc.Addr.lower;
458 temp64.val32.upper = c->ErrDesc.Addr.upper;
459 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
460 c->err_info, (dma_addr_t) temp64.val);
461 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
462 c, (dma_addr_t) c->busaddr);
463 } else {
464 i = c - h->cmd_pool;
465 clear_bit(i & (BITS_PER_LONG - 1),
466 h->cmd_pool_bits + (i / BITS_PER_LONG));
467 h->nr_frees++;
468 }
469 }
470
471 static inline ctlr_info_t *get_host(struct gendisk *disk)
472 {
473 return disk->queue->queuedata;
474 }
475
476 static inline drive_info_struct *get_drv(struct gendisk *disk)
477 {
478 return disk->private_data;
479 }
480
481 /*
482 * Open. Make sure the device is really there.
483 */
484 static int cciss_open(struct inode *inode, struct file *filep)
485 {
486 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
487 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
488
489 #ifdef CCISS_DEBUG
490 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
491 #endif /* CCISS_DEBUG */
492
493 if (host->busy_initializing || drv->busy_configuring)
494 return -EBUSY;
495 /*
496 * Root is allowed to open raw volume zero even if it's not configured
497 * so array config can still work. Root is also allowed to open any
498 * volume that has a LUN ID, so it can issue IOCTL to reread the
499 * disk information. I don't think I really like this
500 * but I'm already using way to many device nodes to claim another one
501 * for "raw controller".
502 */
503 if (drv->heads == 0) {
504 if (iminor(inode) != 0) { /* not node 0? */
505 /* if not node 0 make sure it is a partition = 0 */
506 if (iminor(inode) & 0x0f) {
507 return -ENXIO;
508 /* if it is, make sure we have a LUN ID */
509 } else if (drv->LunID == 0) {
510 return -ENXIO;
511 }
512 }
513 if (!capable(CAP_SYS_ADMIN))
514 return -EPERM;
515 }
516 drv->usage_count++;
517 host->usage_count++;
518 return 0;
519 }
520
521 /*
522 * Close. Sync first.
523 */
524 static int cciss_release(struct inode *inode, struct file *filep)
525 {
526 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
527 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
528
529 #ifdef CCISS_DEBUG
530 printk(KERN_DEBUG "cciss_release %s\n",
531 inode->i_bdev->bd_disk->disk_name);
532 #endif /* CCISS_DEBUG */
533
534 drv->usage_count--;
535 host->usage_count--;
536 return 0;
537 }
538
539 #ifdef CONFIG_COMPAT
540
541 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
542 {
543 int ret;
544 lock_kernel();
545 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
546 unlock_kernel();
547 return ret;
548 }
549
550 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
551 unsigned long arg);
552 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
553 unsigned long arg);
554
555 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
556 {
557 switch (cmd) {
558 case CCISS_GETPCIINFO:
559 case CCISS_GETINTINFO:
560 case CCISS_SETINTINFO:
561 case CCISS_GETNODENAME:
562 case CCISS_SETNODENAME:
563 case CCISS_GETHEARTBEAT:
564 case CCISS_GETBUSTYPES:
565 case CCISS_GETFIRMVER:
566 case CCISS_GETDRIVVER:
567 case CCISS_REVALIDVOLS:
568 case CCISS_DEREGDISK:
569 case CCISS_REGNEWDISK:
570 case CCISS_REGNEWD:
571 case CCISS_RESCANDISK:
572 case CCISS_GETLUNINFO:
573 return do_ioctl(f, cmd, arg);
574
575 case CCISS_PASSTHRU32:
576 return cciss_ioctl32_passthru(f, cmd, arg);
577 case CCISS_BIG_PASSTHRU32:
578 return cciss_ioctl32_big_passthru(f, cmd, arg);
579
580 default:
581 return -ENOIOCTLCMD;
582 }
583 }
584
585 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
586 unsigned long arg)
587 {
588 IOCTL32_Command_struct __user *arg32 =
589 (IOCTL32_Command_struct __user *) arg;
590 IOCTL_Command_struct arg64;
591 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
592 int err;
593 u32 cp;
594
595 err = 0;
596 err |=
597 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
598 sizeof(arg64.LUN_info));
599 err |=
600 copy_from_user(&arg64.Request, &arg32->Request,
601 sizeof(arg64.Request));
602 err |=
603 copy_from_user(&arg64.error_info, &arg32->error_info,
604 sizeof(arg64.error_info));
605 err |= get_user(arg64.buf_size, &arg32->buf_size);
606 err |= get_user(cp, &arg32->buf);
607 arg64.buf = compat_ptr(cp);
608 err |= copy_to_user(p, &arg64, sizeof(arg64));
609
610 if (err)
611 return -EFAULT;
612
613 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
614 if (err)
615 return err;
616 err |=
617 copy_in_user(&arg32->error_info, &p->error_info,
618 sizeof(arg32->error_info));
619 if (err)
620 return -EFAULT;
621 return err;
622 }
623
624 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
625 unsigned long arg)
626 {
627 BIG_IOCTL32_Command_struct __user *arg32 =
628 (BIG_IOCTL32_Command_struct __user *) arg;
629 BIG_IOCTL_Command_struct arg64;
630 BIG_IOCTL_Command_struct __user *p =
631 compat_alloc_user_space(sizeof(arg64));
632 int err;
633 u32 cp;
634
635 err = 0;
636 err |=
637 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
638 sizeof(arg64.LUN_info));
639 err |=
640 copy_from_user(&arg64.Request, &arg32->Request,
641 sizeof(arg64.Request));
642 err |=
643 copy_from_user(&arg64.error_info, &arg32->error_info,
644 sizeof(arg64.error_info));
645 err |= get_user(arg64.buf_size, &arg32->buf_size);
646 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
647 err |= get_user(cp, &arg32->buf);
648 arg64.buf = compat_ptr(cp);
649 err |= copy_to_user(p, &arg64, sizeof(arg64));
650
651 if (err)
652 return -EFAULT;
653
654 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
655 if (err)
656 return err;
657 err |=
658 copy_in_user(&arg32->error_info, &p->error_info,
659 sizeof(arg32->error_info));
660 if (err)
661 return -EFAULT;
662 return err;
663 }
664 #endif
665
666 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
667 {
668 drive_info_struct *drv = get_drv(bdev->bd_disk);
669
670 if (!drv->cylinders)
671 return -ENXIO;
672
673 geo->heads = drv->heads;
674 geo->sectors = drv->sectors;
675 geo->cylinders = drv->cylinders;
676 return 0;
677 }
678
679 /*
680 * ioctl
681 */
682 static int cciss_ioctl(struct inode *inode, struct file *filep,
683 unsigned int cmd, unsigned long arg)
684 {
685 struct block_device *bdev = inode->i_bdev;
686 struct gendisk *disk = bdev->bd_disk;
687 ctlr_info_t *host = get_host(disk);
688 drive_info_struct *drv = get_drv(disk);
689 int ctlr = host->ctlr;
690 void __user *argp = (void __user *)arg;
691
692 #ifdef CCISS_DEBUG
693 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
694 #endif /* CCISS_DEBUG */
695
696 switch (cmd) {
697 case CCISS_GETPCIINFO:
698 {
699 cciss_pci_info_struct pciinfo;
700
701 if (!arg)
702 return -EINVAL;
703 pciinfo.domain = pci_domain_nr(host->pdev->bus);
704 pciinfo.bus = host->pdev->bus->number;
705 pciinfo.dev_fn = host->pdev->devfn;
706 pciinfo.board_id = host->board_id;
707 if (copy_to_user
708 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
709 return -EFAULT;
710 return 0;
711 }
712 case CCISS_GETINTINFO:
713 {
714 cciss_coalint_struct intinfo;
715 if (!arg)
716 return -EINVAL;
717 intinfo.delay =
718 readl(&host->cfgtable->HostWrite.CoalIntDelay);
719 intinfo.count =
720 readl(&host->cfgtable->HostWrite.CoalIntCount);
721 if (copy_to_user
722 (argp, &intinfo, sizeof(cciss_coalint_struct)))
723 return -EFAULT;
724 return 0;
725 }
726 case CCISS_SETINTINFO:
727 {
728 cciss_coalint_struct intinfo;
729 unsigned long flags;
730 int i;
731
732 if (!arg)
733 return -EINVAL;
734 if (!capable(CAP_SYS_ADMIN))
735 return -EPERM;
736 if (copy_from_user
737 (&intinfo, argp, sizeof(cciss_coalint_struct)))
738 return -EFAULT;
739 if ((intinfo.delay == 0) && (intinfo.count == 0))
740 {
741 // printk("cciss_ioctl: delay and count cannot be 0\n");
742 return -EINVAL;
743 }
744 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
745 /* Update the field, and then ring the doorbell */
746 writel(intinfo.delay,
747 &(host->cfgtable->HostWrite.CoalIntDelay));
748 writel(intinfo.count,
749 &(host->cfgtable->HostWrite.CoalIntCount));
750 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
751
752 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
753 if (!(readl(host->vaddr + SA5_DOORBELL)
754 & CFGTBL_ChangeReq))
755 break;
756 /* delay and try again */
757 udelay(1000);
758 }
759 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
760 if (i >= MAX_IOCTL_CONFIG_WAIT)
761 return -EAGAIN;
762 return 0;
763 }
764 case CCISS_GETNODENAME:
765 {
766 NodeName_type NodeName;
767 int i;
768
769 if (!arg)
770 return -EINVAL;
771 for (i = 0; i < 16; i++)
772 NodeName[i] =
773 readb(&host->cfgtable->ServerName[i]);
774 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
775 return -EFAULT;
776 return 0;
777 }
778 case CCISS_SETNODENAME:
779 {
780 NodeName_type NodeName;
781 unsigned long flags;
782 int i;
783
784 if (!arg)
785 return -EINVAL;
786 if (!capable(CAP_SYS_ADMIN))
787 return -EPERM;
788
789 if (copy_from_user
790 (NodeName, argp, sizeof(NodeName_type)))
791 return -EFAULT;
792
793 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
794
795 /* Update the field, and then ring the doorbell */
796 for (i = 0; i < 16; i++)
797 writeb(NodeName[i],
798 &host->cfgtable->ServerName[i]);
799
800 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
801
802 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
803 if (!(readl(host->vaddr + SA5_DOORBELL)
804 & CFGTBL_ChangeReq))
805 break;
806 /* delay and try again */
807 udelay(1000);
808 }
809 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
810 if (i >= MAX_IOCTL_CONFIG_WAIT)
811 return -EAGAIN;
812 return 0;
813 }
814
815 case CCISS_GETHEARTBEAT:
816 {
817 Heartbeat_type heartbeat;
818
819 if (!arg)
820 return -EINVAL;
821 heartbeat = readl(&host->cfgtable->HeartBeat);
822 if (copy_to_user
823 (argp, &heartbeat, sizeof(Heartbeat_type)))
824 return -EFAULT;
825 return 0;
826 }
827 case CCISS_GETBUSTYPES:
828 {
829 BusTypes_type BusTypes;
830
831 if (!arg)
832 return -EINVAL;
833 BusTypes = readl(&host->cfgtable->BusTypes);
834 if (copy_to_user
835 (argp, &BusTypes, sizeof(BusTypes_type)))
836 return -EFAULT;
837 return 0;
838 }
839 case CCISS_GETFIRMVER:
840 {
841 FirmwareVer_type firmware;
842
843 if (!arg)
844 return -EINVAL;
845 memcpy(firmware, host->firm_ver, 4);
846
847 if (copy_to_user
848 (argp, firmware, sizeof(FirmwareVer_type)))
849 return -EFAULT;
850 return 0;
851 }
852 case CCISS_GETDRIVVER:
853 {
854 DriverVer_type DriverVer = DRIVER_VERSION;
855
856 if (!arg)
857 return -EINVAL;
858
859 if (copy_to_user
860 (argp, &DriverVer, sizeof(DriverVer_type)))
861 return -EFAULT;
862 return 0;
863 }
864
865 case CCISS_REVALIDVOLS:
866 return rebuild_lun_table(host, NULL);
867
868 case CCISS_GETLUNINFO:{
869 LogvolInfo_struct luninfo;
870
871 luninfo.LunID = drv->LunID;
872 luninfo.num_opens = drv->usage_count;
873 luninfo.num_parts = 0;
874 if (copy_to_user(argp, &luninfo,
875 sizeof(LogvolInfo_struct)))
876 return -EFAULT;
877 return 0;
878 }
879 case CCISS_DEREGDISK:
880 return rebuild_lun_table(host, disk);
881
882 case CCISS_REGNEWD:
883 return rebuild_lun_table(host, NULL);
884
885 case CCISS_PASSTHRU:
886 {
887 IOCTL_Command_struct iocommand;
888 CommandList_struct *c;
889 char *buff = NULL;
890 u64bit temp64;
891 unsigned long flags;
892 DECLARE_COMPLETION_ONSTACK(wait);
893
894 if (!arg)
895 return -EINVAL;
896
897 if (!capable(CAP_SYS_RAWIO))
898 return -EPERM;
899
900 if (copy_from_user
901 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
902 return -EFAULT;
903 if ((iocommand.buf_size < 1) &&
904 (iocommand.Request.Type.Direction != XFER_NONE)) {
905 return -EINVAL;
906 }
907 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
908 /* Check kmalloc limits */
909 if (iocommand.buf_size > 128000)
910 return -EINVAL;
911 #endif
912 if (iocommand.buf_size > 0) {
913 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
914 if (buff == NULL)
915 return -EFAULT;
916 }
917 if (iocommand.Request.Type.Direction == XFER_WRITE) {
918 /* Copy the data into the buffer we created */
919 if (copy_from_user
920 (buff, iocommand.buf, iocommand.buf_size)) {
921 kfree(buff);
922 return -EFAULT;
923 }
924 } else {
925 memset(buff, 0, iocommand.buf_size);
926 }
927 if ((c = cmd_alloc(host, 0)) == NULL) {
928 kfree(buff);
929 return -ENOMEM;
930 }
931 // Fill in the command type
932 c->cmd_type = CMD_IOCTL_PEND;
933 // Fill in Command Header
934 c->Header.ReplyQueue = 0; // unused in simple mode
935 if (iocommand.buf_size > 0) // buffer to fill
936 {
937 c->Header.SGList = 1;
938 c->Header.SGTotal = 1;
939 } else // no buffers to fill
940 {
941 c->Header.SGList = 0;
942 c->Header.SGTotal = 0;
943 }
944 c->Header.LUN = iocommand.LUN_info;
945 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
946
947 // Fill in Request block
948 c->Request = iocommand.Request;
949
950 // Fill in the scatter gather information
951 if (iocommand.buf_size > 0) {
952 temp64.val = pci_map_single(host->pdev, buff,
953 iocommand.buf_size,
954 PCI_DMA_BIDIRECTIONAL);
955 c->SG[0].Addr.lower = temp64.val32.lower;
956 c->SG[0].Addr.upper = temp64.val32.upper;
957 c->SG[0].Len = iocommand.buf_size;
958 c->SG[0].Ext = 0; // we are not chaining
959 }
960 c->waiting = &wait;
961
962 /* Put the request on the tail of the request queue */
963 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
964 addQ(&host->reqQ, c);
965 host->Qdepth++;
966 start_io(host);
967 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
968
969 wait_for_completion(&wait);
970
971 /* unlock the buffers from DMA */
972 temp64.val32.lower = c->SG[0].Addr.lower;
973 temp64.val32.upper = c->SG[0].Addr.upper;
974 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
975 iocommand.buf_size,
976 PCI_DMA_BIDIRECTIONAL);
977
978 /* Copy the error information out */
979 iocommand.error_info = *(c->err_info);
980 if (copy_to_user
981 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
982 kfree(buff);
983 cmd_free(host, c, 0);
984 return -EFAULT;
985 }
986
987 if (iocommand.Request.Type.Direction == XFER_READ) {
988 /* Copy the data out of the buffer we created */
989 if (copy_to_user
990 (iocommand.buf, buff, iocommand.buf_size)) {
991 kfree(buff);
992 cmd_free(host, c, 0);
993 return -EFAULT;
994 }
995 }
996 kfree(buff);
997 cmd_free(host, c, 0);
998 return 0;
999 }
1000 case CCISS_BIG_PASSTHRU:{
1001 BIG_IOCTL_Command_struct *ioc;
1002 CommandList_struct *c;
1003 unsigned char **buff = NULL;
1004 int *buff_size = NULL;
1005 u64bit temp64;
1006 unsigned long flags;
1007 BYTE sg_used = 0;
1008 int status = 0;
1009 int i;
1010 DECLARE_COMPLETION_ONSTACK(wait);
1011 __u32 left;
1012 __u32 sz;
1013 BYTE __user *data_ptr;
1014
1015 if (!arg)
1016 return -EINVAL;
1017 if (!capable(CAP_SYS_RAWIO))
1018 return -EPERM;
1019 ioc = (BIG_IOCTL_Command_struct *)
1020 kmalloc(sizeof(*ioc), GFP_KERNEL);
1021 if (!ioc) {
1022 status = -ENOMEM;
1023 goto cleanup1;
1024 }
1025 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1026 status = -EFAULT;
1027 goto cleanup1;
1028 }
1029 if ((ioc->buf_size < 1) &&
1030 (ioc->Request.Type.Direction != XFER_NONE)) {
1031 status = -EINVAL;
1032 goto cleanup1;
1033 }
1034 /* Check kmalloc limits using all SGs */
1035 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1036 status = -EINVAL;
1037 goto cleanup1;
1038 }
1039 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1040 status = -EINVAL;
1041 goto cleanup1;
1042 }
1043 buff =
1044 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1045 if (!buff) {
1046 status = -ENOMEM;
1047 goto cleanup1;
1048 }
1049 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1050 GFP_KERNEL);
1051 if (!buff_size) {
1052 status = -ENOMEM;
1053 goto cleanup1;
1054 }
1055 left = ioc->buf_size;
1056 data_ptr = ioc->buf;
1057 while (left) {
1058 sz = (left >
1059 ioc->malloc_size) ? ioc->
1060 malloc_size : left;
1061 buff_size[sg_used] = sz;
1062 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1063 if (buff[sg_used] == NULL) {
1064 status = -ENOMEM;
1065 goto cleanup1;
1066 }
1067 if (ioc->Request.Type.Direction == XFER_WRITE) {
1068 if (copy_from_user
1069 (buff[sg_used], data_ptr, sz)) {
1070 status = -ENOMEM;
1071 goto cleanup1;
1072 }
1073 } else {
1074 memset(buff[sg_used], 0, sz);
1075 }
1076 left -= sz;
1077 data_ptr += sz;
1078 sg_used++;
1079 }
1080 if ((c = cmd_alloc(host, 0)) == NULL) {
1081 status = -ENOMEM;
1082 goto cleanup1;
1083 }
1084 c->cmd_type = CMD_IOCTL_PEND;
1085 c->Header.ReplyQueue = 0;
1086
1087 if (ioc->buf_size > 0) {
1088 c->Header.SGList = sg_used;
1089 c->Header.SGTotal = sg_used;
1090 } else {
1091 c->Header.SGList = 0;
1092 c->Header.SGTotal = 0;
1093 }
1094 c->Header.LUN = ioc->LUN_info;
1095 c->Header.Tag.lower = c->busaddr;
1096
1097 c->Request = ioc->Request;
1098 if (ioc->buf_size > 0) {
1099 int i;
1100 for (i = 0; i < sg_used; i++) {
1101 temp64.val =
1102 pci_map_single(host->pdev, buff[i],
1103 buff_size[i],
1104 PCI_DMA_BIDIRECTIONAL);
1105 c->SG[i].Addr.lower =
1106 temp64.val32.lower;
1107 c->SG[i].Addr.upper =
1108 temp64.val32.upper;
1109 c->SG[i].Len = buff_size[i];
1110 c->SG[i].Ext = 0; /* we are not chaining */
1111 }
1112 }
1113 c->waiting = &wait;
1114 /* Put the request on the tail of the request queue */
1115 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1116 addQ(&host->reqQ, c);
1117 host->Qdepth++;
1118 start_io(host);
1119 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1120 wait_for_completion(&wait);
1121 /* unlock the buffers from DMA */
1122 for (i = 0; i < sg_used; i++) {
1123 temp64.val32.lower = c->SG[i].Addr.lower;
1124 temp64.val32.upper = c->SG[i].Addr.upper;
1125 pci_unmap_single(host->pdev,
1126 (dma_addr_t) temp64.val, buff_size[i],
1127 PCI_DMA_BIDIRECTIONAL);
1128 }
1129 /* Copy the error information out */
1130 ioc->error_info = *(c->err_info);
1131 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1132 cmd_free(host, c, 0);
1133 status = -EFAULT;
1134 goto cleanup1;
1135 }
1136 if (ioc->Request.Type.Direction == XFER_READ) {
1137 /* Copy the data out of the buffer we created */
1138 BYTE __user *ptr = ioc->buf;
1139 for (i = 0; i < sg_used; i++) {
1140 if (copy_to_user
1141 (ptr, buff[i], buff_size[i])) {
1142 cmd_free(host, c, 0);
1143 status = -EFAULT;
1144 goto cleanup1;
1145 }
1146 ptr += buff_size[i];
1147 }
1148 }
1149 cmd_free(host, c, 0);
1150 status = 0;
1151 cleanup1:
1152 if (buff) {
1153 for (i = 0; i < sg_used; i++)
1154 kfree(buff[i]);
1155 kfree(buff);
1156 }
1157 kfree(buff_size);
1158 kfree(ioc);
1159 return status;
1160 }
1161
1162 /* scsi_cmd_ioctl handles these, below, though some are not */
1163 /* very meaningful for cciss. SG_IO is the main one people want. */
1164
1165 case SG_GET_VERSION_NUM:
1166 case SG_SET_TIMEOUT:
1167 case SG_GET_TIMEOUT:
1168 case SG_GET_RESERVED_SIZE:
1169 case SG_SET_RESERVED_SIZE:
1170 case SG_EMULATED_HOST:
1171 case SG_IO:
1172 case SCSI_IOCTL_SEND_COMMAND:
1173 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1174
1175 /* scsi_cmd_ioctl would normally handle these, below, but */
1176 /* they aren't a good fit for cciss, as CD-ROMs are */
1177 /* not supported, and we don't have any bus/target/lun */
1178 /* which we present to the kernel. */
1179
1180 case CDROM_SEND_PACKET:
1181 case CDROMCLOSETRAY:
1182 case CDROMEJECT:
1183 case SCSI_IOCTL_GET_IDLUN:
1184 case SCSI_IOCTL_GET_BUS_NUMBER:
1185 default:
1186 return -ENOTTY;
1187 }
1188 }
1189
1190 static inline void complete_buffers(struct bio *bio, int status)
1191 {
1192 while (bio) {
1193 struct bio *xbh = bio->bi_next;
1194 int nr_sectors = bio_sectors(bio);
1195
1196 bio->bi_next = NULL;
1197 bio_endio(bio, status ? 0 : -EIO);
1198 bio = xbh;
1199 }
1200 }
1201
1202 static void cciss_check_queues(ctlr_info_t *h)
1203 {
1204 int start_queue = h->next_to_run;
1205 int i;
1206
1207 /* check to see if we have maxed out the number of commands that can
1208 * be placed on the queue. If so then exit. We do this check here
1209 * in case the interrupt we serviced was from an ioctl and did not
1210 * free any new commands.
1211 */
1212 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1213 return;
1214
1215 /* We have room on the queue for more commands. Now we need to queue
1216 * them up. We will also keep track of the next queue to run so
1217 * that every queue gets a chance to be started first.
1218 */
1219 for (i = 0; i < h->highest_lun + 1; i++) {
1220 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1221 /* make sure the disk has been added and the drive is real
1222 * because this can be called from the middle of init_one.
1223 */
1224 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1225 continue;
1226 blk_start_queue(h->gendisk[curr_queue]->queue);
1227
1228 /* check to see if we have maxed out the number of commands
1229 * that can be placed on the queue.
1230 */
1231 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1232 if (curr_queue == start_queue) {
1233 h->next_to_run =
1234 (start_queue + 1) % (h->highest_lun + 1);
1235 break;
1236 } else {
1237 h->next_to_run = curr_queue;
1238 break;
1239 }
1240 } else {
1241 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1242 }
1243 }
1244 }
1245
1246 static void cciss_softirq_done(struct request *rq)
1247 {
1248 CommandList_struct *cmd = rq->completion_data;
1249 ctlr_info_t *h = hba[cmd->ctlr];
1250 unsigned long flags;
1251 u64bit temp64;
1252 int i, ddir;
1253
1254 if (cmd->Request.Type.Direction == XFER_READ)
1255 ddir = PCI_DMA_FROMDEVICE;
1256 else
1257 ddir = PCI_DMA_TODEVICE;
1258
1259 /* command did not need to be retried */
1260 /* unmap the DMA mapping for all the scatter gather elements */
1261 for (i = 0; i < cmd->Header.SGList; i++) {
1262 temp64.val32.lower = cmd->SG[i].Addr.lower;
1263 temp64.val32.upper = cmd->SG[i].Addr.upper;
1264 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1265 }
1266
1267 complete_buffers(rq->bio, (rq->errors == 0));
1268
1269 if (blk_fs_request(rq)) {
1270 const int rw = rq_data_dir(rq);
1271
1272 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1273 }
1274
1275 #ifdef CCISS_DEBUG
1276 printk("Done with %p\n", rq);
1277 #endif /* CCISS_DEBUG */
1278
1279 add_disk_randomness(rq->rq_disk);
1280 spin_lock_irqsave(&h->lock, flags);
1281 end_that_request_last(rq, (rq->errors == 0));
1282 cmd_free(h, cmd, 1);
1283 cciss_check_queues(h);
1284 spin_unlock_irqrestore(&h->lock, flags);
1285 }
1286
1287 /* This function will check the usage_count of the drive to be updated/added.
1288 * If the usage_count is zero then the drive information will be updated and
1289 * the disk will be re-registered with the kernel. If not then it will be
1290 * left alone for the next reboot. The exception to this is disk 0 which
1291 * will always be left registered with the kernel since it is also the
1292 * controller node. Any changes to disk 0 will show up on the next
1293 * reboot.
1294 */
1295 static void cciss_update_drive_info(int ctlr, int drv_index)
1296 {
1297 ctlr_info_t *h = hba[ctlr];
1298 struct gendisk *disk;
1299 InquiryData_struct *inq_buff = NULL;
1300 unsigned int block_size;
1301 sector_t total_size;
1302 unsigned long flags = 0;
1303 int ret = 0;
1304
1305 /* if the disk already exists then deregister it before proceeding */
1306 if (h->drv[drv_index].raid_level != -1) {
1307 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1308 h->drv[drv_index].busy_configuring = 1;
1309 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1310 ret = deregister_disk(h->gendisk[drv_index],
1311 &h->drv[drv_index], 0);
1312 h->drv[drv_index].busy_configuring = 0;
1313 }
1314
1315 /* If the disk is in use return */
1316 if (ret)
1317 return;
1318
1319 /* Get information about the disk and modify the driver structure */
1320 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1321 if (inq_buff == NULL)
1322 goto mem_msg;
1323
1324 /* testing to see if 16-byte CDBs are already being used */
1325 if (h->cciss_read == CCISS_READ_16) {
1326 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1327 &total_size, &block_size);
1328 goto geo_inq;
1329 }
1330
1331 cciss_read_capacity(ctlr, drv_index, 1,
1332 &total_size, &block_size);
1333
1334 /* if read_capacity returns all F's this volume is >2TB in size */
1335 /* so we switch to 16-byte CDB's for all read/write ops */
1336 if (total_size == 0xFFFFFFFFULL) {
1337 cciss_read_capacity_16(ctlr, drv_index, 1,
1338 &total_size, &block_size);
1339 h->cciss_read = CCISS_READ_16;
1340 h->cciss_write = CCISS_WRITE_16;
1341 } else {
1342 h->cciss_read = CCISS_READ_10;
1343 h->cciss_write = CCISS_WRITE_10;
1344 }
1345 geo_inq:
1346 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1347 inq_buff, &h->drv[drv_index]);
1348
1349 ++h->num_luns;
1350 disk = h->gendisk[drv_index];
1351 set_capacity(disk, h->drv[drv_index].nr_blocks);
1352
1353 /* if it's the controller it's already added */
1354 if (drv_index) {
1355 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1356 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1357 disk->major = h->major;
1358 disk->first_minor = drv_index << NWD_SHIFT;
1359 disk->fops = &cciss_fops;
1360 disk->private_data = &h->drv[drv_index];
1361
1362 /* Set up queue information */
1363 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1364 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1365
1366 /* This is a hardware imposed limit. */
1367 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1368
1369 /* This is a limit in the driver and could be eliminated. */
1370 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1371
1372 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1373
1374 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1375
1376 disk->queue->queuedata = hba[ctlr];
1377
1378 blk_queue_hardsect_size(disk->queue,
1379 hba[ctlr]->drv[drv_index].block_size);
1380
1381 h->drv[drv_index].queue = disk->queue;
1382 add_disk(disk);
1383 }
1384
1385 freeret:
1386 kfree(inq_buff);
1387 return;
1388 mem_msg:
1389 printk(KERN_ERR "cciss: out of memory\n");
1390 goto freeret;
1391 }
1392
1393 /* This function will find the first index of the controllers drive array
1394 * that has a -1 for the raid_level and will return that index. This is
1395 * where new drives will be added. If the index to be returned is greater
1396 * than the highest_lun index for the controller then highest_lun is set
1397 * to this new index. If there are no available indexes then -1 is returned.
1398 */
1399 static int cciss_find_free_drive_index(int ctlr)
1400 {
1401 int i;
1402
1403 for (i = 0; i < CISS_MAX_LUN; i++) {
1404 if (hba[ctlr]->drv[i].raid_level == -1) {
1405 if (i > hba[ctlr]->highest_lun)
1406 hba[ctlr]->highest_lun = i;
1407 return i;
1408 }
1409 }
1410 return -1;
1411 }
1412
1413 /* This function will add and remove logical drives from the Logical
1414 * drive array of the controller and maintain persistency of ordering
1415 * so that mount points are preserved until the next reboot. This allows
1416 * for the removal of logical drives in the middle of the drive array
1417 * without a re-ordering of those drives.
1418 * INPUT
1419 * h = The controller to perform the operations on
1420 * del_disk = The disk to remove if specified. If the value given
1421 * is NULL then no disk is removed.
1422 */
1423 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1424 {
1425 int ctlr = h->ctlr;
1426 int num_luns;
1427 ReportLunData_struct *ld_buff = NULL;
1428 drive_info_struct *drv = NULL;
1429 int return_code;
1430 int listlength = 0;
1431 int i;
1432 int drv_found;
1433 int drv_index = 0;
1434 __u32 lunid = 0;
1435 unsigned long flags;
1436
1437 /* Set busy_configuring flag for this operation */
1438 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1439 if (h->busy_configuring) {
1440 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1441 return -EBUSY;
1442 }
1443 h->busy_configuring = 1;
1444
1445 /* if del_disk is NULL then we are being called to add a new disk
1446 * and update the logical drive table. If it is not NULL then
1447 * we will check if the disk is in use or not.
1448 */
1449 if (del_disk != NULL) {
1450 drv = get_drv(del_disk);
1451 drv->busy_configuring = 1;
1452 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1453 return_code = deregister_disk(del_disk, drv, 1);
1454 drv->busy_configuring = 0;
1455 h->busy_configuring = 0;
1456 return return_code;
1457 } else {
1458 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1459 if (!capable(CAP_SYS_RAWIO))
1460 return -EPERM;
1461
1462 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1463 if (ld_buff == NULL)
1464 goto mem_msg;
1465
1466 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1467 sizeof(ReportLunData_struct), 0,
1468 0, 0, TYPE_CMD);
1469
1470 if (return_code == IO_OK) {
1471 listlength =
1472 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1473 } else { /* reading number of logical volumes failed */
1474 printk(KERN_WARNING "cciss: report logical volume"
1475 " command failed\n");
1476 listlength = 0;
1477 goto freeret;
1478 }
1479
1480 num_luns = listlength / 8; /* 8 bytes per entry */
1481 if (num_luns > CISS_MAX_LUN) {
1482 num_luns = CISS_MAX_LUN;
1483 printk(KERN_WARNING "cciss: more luns configured"
1484 " on controller than can be handled by"
1485 " this driver.\n");
1486 }
1487
1488 /* Compare controller drive array to drivers drive array.
1489 * Check for updates in the drive information and any new drives
1490 * on the controller.
1491 */
1492 for (i = 0; i < num_luns; i++) {
1493 int j;
1494
1495 drv_found = 0;
1496
1497 lunid = (0xff &
1498 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1499 lunid |= (0xff &
1500 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1501 lunid |= (0xff &
1502 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1503 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1504
1505 /* Find if the LUN is already in the drive array
1506 * of the controller. If so then update its info
1507 * if not is use. If it does not exist then find
1508 * the first free index and add it.
1509 */
1510 for (j = 0; j <= h->highest_lun; j++) {
1511 if (h->drv[j].LunID == lunid) {
1512 drv_index = j;
1513 drv_found = 1;
1514 }
1515 }
1516
1517 /* check if the drive was found already in the array */
1518 if (!drv_found) {
1519 drv_index = cciss_find_free_drive_index(ctlr);
1520 if (drv_index == -1)
1521 goto freeret;
1522
1523 /*Check if the gendisk needs to be allocated */
1524 if (!h->gendisk[drv_index]){
1525 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1526 if (!h->gendisk[drv_index]){
1527 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1528 goto mem_msg;
1529 }
1530 }
1531 }
1532 h->drv[drv_index].LunID = lunid;
1533 cciss_update_drive_info(ctlr, drv_index);
1534 } /* end for */
1535 } /* end else */
1536
1537 freeret:
1538 kfree(ld_buff);
1539 h->busy_configuring = 0;
1540 /* We return -1 here to tell the ACU that we have registered/updated
1541 * all of the drives that we can and to keep it from calling us
1542 * additional times.
1543 */
1544 return -1;
1545 mem_msg:
1546 printk(KERN_ERR "cciss: out of memory\n");
1547 goto freeret;
1548 }
1549
1550 /* This function will deregister the disk and it's queue from the
1551 * kernel. It must be called with the controller lock held and the
1552 * drv structures busy_configuring flag set. It's parameters are:
1553 *
1554 * disk = This is the disk to be deregistered
1555 * drv = This is the drive_info_struct associated with the disk to be
1556 * deregistered. It contains information about the disk used
1557 * by the driver.
1558 * clear_all = This flag determines whether or not the disk information
1559 * is going to be completely cleared out and the highest_lun
1560 * reset. Sometimes we want to clear out information about
1561 * the disk in preparation for re-adding it. In this case
1562 * the highest_lun should be left unchanged and the LunID
1563 * should not be cleared.
1564 */
1565 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1566 int clear_all)
1567 {
1568 int i;
1569 ctlr_info_t *h = get_host(disk);
1570
1571 if (!capable(CAP_SYS_RAWIO))
1572 return -EPERM;
1573
1574 /* make sure logical volume is NOT is use */
1575 if (clear_all || (h->gendisk[0] == disk)) {
1576 if (drv->usage_count > 1)
1577 return -EBUSY;
1578 } else if (drv->usage_count > 0)
1579 return -EBUSY;
1580
1581 /* invalidate the devices and deregister the disk. If it is disk
1582 * zero do not deregister it but just zero out it's values. This
1583 * allows us to delete disk zero but keep the controller registered.
1584 */
1585 if (h->gendisk[0] != disk) {
1586 if (disk) {
1587 struct request_queue *q = disk->queue;
1588 if (disk->flags & GENHD_FL_UP)
1589 del_gendisk(disk);
1590 if (q) {
1591 blk_cleanup_queue(q);
1592 /* Set drv->queue to NULL so that we do not try
1593 * to call blk_start_queue on this queue in the
1594 * interrupt handler
1595 */
1596 drv->queue = NULL;
1597 }
1598 /* If clear_all is set then we are deleting the logical
1599 * drive, not just refreshing its info. For drives
1600 * other than disk 0 we will call put_disk. We do not
1601 * do this for disk 0 as we need it to be able to
1602 * configure the controller.
1603 */
1604 if (clear_all){
1605 /* This isn't pretty, but we need to find the
1606 * disk in our array and NULL our the pointer.
1607 * This is so that we will call alloc_disk if
1608 * this index is used again later.
1609 */
1610 for (i=0; i < CISS_MAX_LUN; i++){
1611 if(h->gendisk[i] == disk){
1612 h->gendisk[i] = NULL;
1613 break;
1614 }
1615 }
1616 put_disk(disk);
1617 }
1618 }
1619 } else {
1620 set_capacity(disk, 0);
1621 }
1622
1623 --h->num_luns;
1624 /* zero out the disk size info */
1625 drv->nr_blocks = 0;
1626 drv->block_size = 0;
1627 drv->heads = 0;
1628 drv->sectors = 0;
1629 drv->cylinders = 0;
1630 drv->raid_level = -1; /* This can be used as a flag variable to
1631 * indicate that this element of the drive
1632 * array is free.
1633 */
1634
1635 if (clear_all) {
1636 /* check to see if it was the last disk */
1637 if (drv == h->drv + h->highest_lun) {
1638 /* if so, find the new hightest lun */
1639 int i, newhighest = -1;
1640 for (i = 0; i < h->highest_lun; i++) {
1641 /* if the disk has size > 0, it is available */
1642 if (h->drv[i].heads)
1643 newhighest = i;
1644 }
1645 h->highest_lun = newhighest;
1646 }
1647
1648 drv->LunID = 0;
1649 }
1650 return 0;
1651 }
1652
1653 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1654 1: address logical volume log_unit,
1655 2: periph device address is scsi3addr */
1656 unsigned int log_unit, __u8 page_code,
1657 unsigned char *scsi3addr, int cmd_type)
1658 {
1659 ctlr_info_t *h = hba[ctlr];
1660 u64bit buff_dma_handle;
1661 int status = IO_OK;
1662
1663 c->cmd_type = CMD_IOCTL_PEND;
1664 c->Header.ReplyQueue = 0;
1665 if (buff != NULL) {
1666 c->Header.SGList = 1;
1667 c->Header.SGTotal = 1;
1668 } else {
1669 c->Header.SGList = 0;
1670 c->Header.SGTotal = 0;
1671 }
1672 c->Header.Tag.lower = c->busaddr;
1673
1674 c->Request.Type.Type = cmd_type;
1675 if (cmd_type == TYPE_CMD) {
1676 switch (cmd) {
1677 case CISS_INQUIRY:
1678 /* If the logical unit number is 0 then, this is going
1679 to controller so It's a physical command
1680 mode = 0 target = 0. So we have nothing to write.
1681 otherwise, if use_unit_num == 1,
1682 mode = 1(volume set addressing) target = LUNID
1683 otherwise, if use_unit_num == 2,
1684 mode = 0(periph dev addr) target = scsi3addr */
1685 if (use_unit_num == 1) {
1686 c->Header.LUN.LogDev.VolId =
1687 h->drv[log_unit].LunID;
1688 c->Header.LUN.LogDev.Mode = 1;
1689 } else if (use_unit_num == 2) {
1690 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1691 8);
1692 c->Header.LUN.LogDev.Mode = 0;
1693 }
1694 /* are we trying to read a vital product page */
1695 if (page_code != 0) {
1696 c->Request.CDB[1] = 0x01;
1697 c->Request.CDB[2] = page_code;
1698 }
1699 c->Request.CDBLen = 6;
1700 c->Request.Type.Attribute = ATTR_SIMPLE;
1701 c->Request.Type.Direction = XFER_READ;
1702 c->Request.Timeout = 0;
1703 c->Request.CDB[0] = CISS_INQUIRY;
1704 c->Request.CDB[4] = size & 0xFF;
1705 break;
1706 case CISS_REPORT_LOG:
1707 case CISS_REPORT_PHYS:
1708 /* Talking to controller so It's a physical command
1709 mode = 00 target = 0. Nothing to write.
1710 */
1711 c->Request.CDBLen = 12;
1712 c->Request.Type.Attribute = ATTR_SIMPLE;
1713 c->Request.Type.Direction = XFER_READ;
1714 c->Request.Timeout = 0;
1715 c->Request.CDB[0] = cmd;
1716 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1717 c->Request.CDB[7] = (size >> 16) & 0xFF;
1718 c->Request.CDB[8] = (size >> 8) & 0xFF;
1719 c->Request.CDB[9] = size & 0xFF;
1720 break;
1721
1722 case CCISS_READ_CAPACITY:
1723 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1724 c->Header.LUN.LogDev.Mode = 1;
1725 c->Request.CDBLen = 10;
1726 c->Request.Type.Attribute = ATTR_SIMPLE;
1727 c->Request.Type.Direction = XFER_READ;
1728 c->Request.Timeout = 0;
1729 c->Request.CDB[0] = cmd;
1730 break;
1731 case CCISS_READ_CAPACITY_16:
1732 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1733 c->Header.LUN.LogDev.Mode = 1;
1734 c->Request.CDBLen = 16;
1735 c->Request.Type.Attribute = ATTR_SIMPLE;
1736 c->Request.Type.Direction = XFER_READ;
1737 c->Request.Timeout = 0;
1738 c->Request.CDB[0] = cmd;
1739 c->Request.CDB[1] = 0x10;
1740 c->Request.CDB[10] = (size >> 24) & 0xFF;
1741 c->Request.CDB[11] = (size >> 16) & 0xFF;
1742 c->Request.CDB[12] = (size >> 8) & 0xFF;
1743 c->Request.CDB[13] = size & 0xFF;
1744 c->Request.Timeout = 0;
1745 c->Request.CDB[0] = cmd;
1746 break;
1747 case CCISS_CACHE_FLUSH:
1748 c->Request.CDBLen = 12;
1749 c->Request.Type.Attribute = ATTR_SIMPLE;
1750 c->Request.Type.Direction = XFER_WRITE;
1751 c->Request.Timeout = 0;
1752 c->Request.CDB[0] = BMIC_WRITE;
1753 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1754 break;
1755 default:
1756 printk(KERN_WARNING
1757 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1758 return IO_ERROR;
1759 }
1760 } else if (cmd_type == TYPE_MSG) {
1761 switch (cmd) {
1762 case 0: /* ABORT message */
1763 c->Request.CDBLen = 12;
1764 c->Request.Type.Attribute = ATTR_SIMPLE;
1765 c->Request.Type.Direction = XFER_WRITE;
1766 c->Request.Timeout = 0;
1767 c->Request.CDB[0] = cmd; /* abort */
1768 c->Request.CDB[1] = 0; /* abort a command */
1769 /* buff contains the tag of the command to abort */
1770 memcpy(&c->Request.CDB[4], buff, 8);
1771 break;
1772 case 1: /* RESET message */
1773 c->Request.CDBLen = 12;
1774 c->Request.Type.Attribute = ATTR_SIMPLE;
1775 c->Request.Type.Direction = XFER_WRITE;
1776 c->Request.Timeout = 0;
1777 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1778 c->Request.CDB[0] = cmd; /* reset */
1779 c->Request.CDB[1] = 0x04; /* reset a LUN */
1780 break;
1781 case 3: /* No-Op message */
1782 c->Request.CDBLen = 1;
1783 c->Request.Type.Attribute = ATTR_SIMPLE;
1784 c->Request.Type.Direction = XFER_WRITE;
1785 c->Request.Timeout = 0;
1786 c->Request.CDB[0] = cmd;
1787 break;
1788 default:
1789 printk(KERN_WARNING
1790 "cciss%d: unknown message type %d\n", ctlr, cmd);
1791 return IO_ERROR;
1792 }
1793 } else {
1794 printk(KERN_WARNING
1795 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1796 return IO_ERROR;
1797 }
1798 /* Fill in the scatter gather information */
1799 if (size > 0) {
1800 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1801 buff, size,
1802 PCI_DMA_BIDIRECTIONAL);
1803 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1804 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1805 c->SG[0].Len = size;
1806 c->SG[0].Ext = 0; /* we are not chaining */
1807 }
1808 return status;
1809 }
1810
1811 static int sendcmd_withirq(__u8 cmd,
1812 int ctlr,
1813 void *buff,
1814 size_t size,
1815 unsigned int use_unit_num,
1816 unsigned int log_unit, __u8 page_code, int cmd_type)
1817 {
1818 ctlr_info_t *h = hba[ctlr];
1819 CommandList_struct *c;
1820 u64bit buff_dma_handle;
1821 unsigned long flags;
1822 int return_status;
1823 DECLARE_COMPLETION_ONSTACK(wait);
1824
1825 if ((c = cmd_alloc(h, 0)) == NULL)
1826 return -ENOMEM;
1827 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1828 log_unit, page_code, NULL, cmd_type);
1829 if (return_status != IO_OK) {
1830 cmd_free(h, c, 0);
1831 return return_status;
1832 }
1833 resend_cmd2:
1834 c->waiting = &wait;
1835
1836 /* Put the request on the tail of the queue and send it */
1837 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1838 addQ(&h->reqQ, c);
1839 h->Qdepth++;
1840 start_io(h);
1841 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1842
1843 wait_for_completion(&wait);
1844
1845 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1846 switch (c->err_info->CommandStatus) {
1847 case CMD_TARGET_STATUS:
1848 printk(KERN_WARNING "cciss: cmd %p has "
1849 " completed with errors\n", c);
1850 if (c->err_info->ScsiStatus) {
1851 printk(KERN_WARNING "cciss: cmd %p "
1852 "has SCSI Status = %x\n",
1853 c, c->err_info->ScsiStatus);
1854 }
1855
1856 break;
1857 case CMD_DATA_UNDERRUN:
1858 case CMD_DATA_OVERRUN:
1859 /* expected for inquire and report lun commands */
1860 break;
1861 case CMD_INVALID:
1862 printk(KERN_WARNING "cciss: Cmd %p is "
1863 "reported invalid\n", c);
1864 return_status = IO_ERROR;
1865 break;
1866 case CMD_PROTOCOL_ERR:
1867 printk(KERN_WARNING "cciss: cmd %p has "
1868 "protocol error \n", c);
1869 return_status = IO_ERROR;
1870 break;
1871 case CMD_HARDWARE_ERR:
1872 printk(KERN_WARNING "cciss: cmd %p had "
1873 " hardware error\n", c);
1874 return_status = IO_ERROR;
1875 break;
1876 case CMD_CONNECTION_LOST:
1877 printk(KERN_WARNING "cciss: cmd %p had "
1878 "connection lost\n", c);
1879 return_status = IO_ERROR;
1880 break;
1881 case CMD_ABORTED:
1882 printk(KERN_WARNING "cciss: cmd %p was "
1883 "aborted\n", c);
1884 return_status = IO_ERROR;
1885 break;
1886 case CMD_ABORT_FAILED:
1887 printk(KERN_WARNING "cciss: cmd %p reports "
1888 "abort failed\n", c);
1889 return_status = IO_ERROR;
1890 break;
1891 case CMD_UNSOLICITED_ABORT:
1892 printk(KERN_WARNING
1893 "cciss%d: unsolicited abort %p\n", ctlr, c);
1894 if (c->retry_count < MAX_CMD_RETRIES) {
1895 printk(KERN_WARNING
1896 "cciss%d: retrying %p\n", ctlr, c);
1897 c->retry_count++;
1898 /* erase the old error information */
1899 memset(c->err_info, 0,
1900 sizeof(ErrorInfo_struct));
1901 return_status = IO_OK;
1902 INIT_COMPLETION(wait);
1903 goto resend_cmd2;
1904 }
1905 return_status = IO_ERROR;
1906 break;
1907 default:
1908 printk(KERN_WARNING "cciss: cmd %p returned "
1909 "unknown status %x\n", c,
1910 c->err_info->CommandStatus);
1911 return_status = IO_ERROR;
1912 }
1913 }
1914 /* unlock the buffers from DMA */
1915 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1916 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1917 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1918 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1919 cmd_free(h, c, 0);
1920 return return_status;
1921 }
1922
1923 static void cciss_geometry_inquiry(int ctlr, int logvol,
1924 int withirq, sector_t total_size,
1925 unsigned int block_size,
1926 InquiryData_struct *inq_buff,
1927 drive_info_struct *drv)
1928 {
1929 int return_code;
1930 unsigned long t;
1931
1932 memset(inq_buff, 0, sizeof(InquiryData_struct));
1933 if (withirq)
1934 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1935 inq_buff, sizeof(*inq_buff), 1,
1936 logvol, 0xC1, TYPE_CMD);
1937 else
1938 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1939 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1940 TYPE_CMD);
1941 if (return_code == IO_OK) {
1942 if (inq_buff->data_byte[8] == 0xFF) {
1943 printk(KERN_WARNING
1944 "cciss: reading geometry failed, volume "
1945 "does not support reading geometry\n");
1946 drv->heads = 255;
1947 drv->sectors = 32; // Sectors per track
1948 drv->cylinders = total_size + 1;
1949 drv->raid_level = RAID_UNKNOWN;
1950 } else {
1951 drv->heads = inq_buff->data_byte[6];
1952 drv->sectors = inq_buff->data_byte[7];
1953 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1954 drv->cylinders += inq_buff->data_byte[5];
1955 drv->raid_level = inq_buff->data_byte[8];
1956 }
1957 drv->block_size = block_size;
1958 drv->nr_blocks = total_size + 1;
1959 t = drv->heads * drv->sectors;
1960 if (t > 1) {
1961 sector_t real_size = total_size + 1;
1962 unsigned long rem = sector_div(real_size, t);
1963 if (rem)
1964 real_size++;
1965 drv->cylinders = real_size;
1966 }
1967 } else { /* Get geometry failed */
1968 printk(KERN_WARNING "cciss: reading geometry failed\n");
1969 }
1970 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1971 drv->heads, drv->sectors, drv->cylinders);
1972 }
1973
1974 static void
1975 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1976 unsigned int *block_size)
1977 {
1978 ReadCapdata_struct *buf;
1979 int return_code;
1980
1981 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1982 if (!buf) {
1983 printk(KERN_WARNING "cciss: out of memory\n");
1984 return;
1985 }
1986
1987 if (withirq)
1988 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1989 ctlr, buf, sizeof(ReadCapdata_struct),
1990 1, logvol, 0, TYPE_CMD);
1991 else
1992 return_code = sendcmd(CCISS_READ_CAPACITY,
1993 ctlr, buf, sizeof(ReadCapdata_struct),
1994 1, logvol, 0, NULL, TYPE_CMD);
1995 if (return_code == IO_OK) {
1996 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1997 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1998 } else { /* read capacity command failed */
1999 printk(KERN_WARNING "cciss: read capacity failed\n");
2000 *total_size = 0;
2001 *block_size = BLOCK_SIZE;
2002 }
2003 if (*total_size != 0)
2004 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2005 (unsigned long long)*total_size+1, *block_size);
2006 kfree(buf);
2007 }
2008
2009 static void
2010 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2011 {
2012 ReadCapdata_struct_16 *buf;
2013 int return_code;
2014
2015 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2016 if (!buf) {
2017 printk(KERN_WARNING "cciss: out of memory\n");
2018 return;
2019 }
2020
2021 if (withirq) {
2022 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2023 ctlr, buf, sizeof(ReadCapdata_struct_16),
2024 1, logvol, 0, TYPE_CMD);
2025 }
2026 else {
2027 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2028 ctlr, buf, sizeof(ReadCapdata_struct_16),
2029 1, logvol, 0, NULL, TYPE_CMD);
2030 }
2031 if (return_code == IO_OK) {
2032 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2033 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2034 } else { /* read capacity command failed */
2035 printk(KERN_WARNING "cciss: read capacity failed\n");
2036 *total_size = 0;
2037 *block_size = BLOCK_SIZE;
2038 }
2039 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2040 (unsigned long long)*total_size+1, *block_size);
2041 kfree(buf);
2042 }
2043
2044 static int cciss_revalidate(struct gendisk *disk)
2045 {
2046 ctlr_info_t *h = get_host(disk);
2047 drive_info_struct *drv = get_drv(disk);
2048 int logvol;
2049 int FOUND = 0;
2050 unsigned int block_size;
2051 sector_t total_size;
2052 InquiryData_struct *inq_buff = NULL;
2053
2054 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2055 if (h->drv[logvol].LunID == drv->LunID) {
2056 FOUND = 1;
2057 break;
2058 }
2059 }
2060
2061 if (!FOUND)
2062 return 1;
2063
2064 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2065 if (inq_buff == NULL) {
2066 printk(KERN_WARNING "cciss: out of memory\n");
2067 return 1;
2068 }
2069 if (h->cciss_read == CCISS_READ_10) {
2070 cciss_read_capacity(h->ctlr, logvol, 1,
2071 &total_size, &block_size);
2072 } else {
2073 cciss_read_capacity_16(h->ctlr, logvol, 1,
2074 &total_size, &block_size);
2075 }
2076 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2077 inq_buff, drv);
2078
2079 blk_queue_hardsect_size(drv->queue, drv->block_size);
2080 set_capacity(disk, drv->nr_blocks);
2081
2082 kfree(inq_buff);
2083 return 0;
2084 }
2085
2086 /*
2087 * Wait polling for a command to complete.
2088 * The memory mapped FIFO is polled for the completion.
2089 * Used only at init time, interrupts from the HBA are disabled.
2090 */
2091 static unsigned long pollcomplete(int ctlr)
2092 {
2093 unsigned long done;
2094 int i;
2095
2096 /* Wait (up to 20 seconds) for a command to complete */
2097
2098 for (i = 20 * HZ; i > 0; i--) {
2099 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2100 if (done == FIFO_EMPTY)
2101 schedule_timeout_uninterruptible(1);
2102 else
2103 return done;
2104 }
2105 /* Invalid address to tell caller we ran out of time */
2106 return 1;
2107 }
2108
2109 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2110 {
2111 /* We get in here if sendcmd() is polling for completions
2112 and gets some command back that it wasn't expecting --
2113 something other than that which it just sent down.
2114 Ordinarily, that shouldn't happen, but it can happen when
2115 the scsi tape stuff gets into error handling mode, and
2116 starts using sendcmd() to try to abort commands and
2117 reset tape drives. In that case, sendcmd may pick up
2118 completions of commands that were sent to logical drives
2119 through the block i/o system, or cciss ioctls completing, etc.
2120 In that case, we need to save those completions for later
2121 processing by the interrupt handler.
2122 */
2123
2124 #ifdef CONFIG_CISS_SCSI_TAPE
2125 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2126
2127 /* If it's not the scsi tape stuff doing error handling, (abort */
2128 /* or reset) then we don't expect anything weird. */
2129 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2130 #endif
2131 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2132 "Invalid command list address returned! (%lx)\n",
2133 ctlr, complete);
2134 /* not much we can do. */
2135 #ifdef CONFIG_CISS_SCSI_TAPE
2136 return 1;
2137 }
2138
2139 /* We've sent down an abort or reset, but something else
2140 has completed */
2141 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2142 /* Uh oh. No room to save it for later... */
2143 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2144 "reject list overflow, command lost!\n", ctlr);
2145 return 1;
2146 }
2147 /* Save it for later */
2148 srl->complete[srl->ncompletions] = complete;
2149 srl->ncompletions++;
2150 #endif
2151 return 0;
2152 }
2153
2154 /*
2155 * Send a command to the controller, and wait for it to complete.
2156 * Only used at init time.
2157 */
2158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2159 1: address logical volume log_unit,
2160 2: periph device address is scsi3addr */
2161 unsigned int log_unit,
2162 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2163 {
2164 CommandList_struct *c;
2165 int i;
2166 unsigned long complete;
2167 ctlr_info_t *info_p = hba[ctlr];
2168 u64bit buff_dma_handle;
2169 int status, done = 0;
2170
2171 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2172 printk(KERN_WARNING "cciss: unable to get memory");
2173 return IO_ERROR;
2174 }
2175 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2176 log_unit, page_code, scsi3addr, cmd_type);
2177 if (status != IO_OK) {
2178 cmd_free(info_p, c, 1);
2179 return status;
2180 }
2181 resend_cmd1:
2182 /*
2183 * Disable interrupt
2184 */
2185 #ifdef CCISS_DEBUG
2186 printk(KERN_DEBUG "cciss: turning intr off\n");
2187 #endif /* CCISS_DEBUG */
2188 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2189
2190 /* Make sure there is room in the command FIFO */
2191 /* Actually it should be completely empty at this time */
2192 /* unless we are in here doing error handling for the scsi */
2193 /* tape side of the driver. */
2194 for (i = 200000; i > 0; i--) {
2195 /* if fifo isn't full go */
2196 if (!(info_p->access.fifo_full(info_p))) {
2197
2198 break;
2199 }
2200 udelay(10);
2201 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2202 " waiting!\n", ctlr);
2203 }
2204 /*
2205 * Send the cmd
2206 */
2207 info_p->access.submit_command(info_p, c);
2208 done = 0;
2209 do {
2210 complete = pollcomplete(ctlr);
2211
2212 #ifdef CCISS_DEBUG
2213 printk(KERN_DEBUG "cciss: command completed\n");
2214 #endif /* CCISS_DEBUG */
2215
2216 if (complete == 1) {
2217 printk(KERN_WARNING
2218 "cciss cciss%d: SendCmd Timeout out, "
2219 "No command list address returned!\n", ctlr);
2220 status = IO_ERROR;
2221 done = 1;
2222 break;
2223 }
2224
2225 /* This will need to change for direct lookup completions */
2226 if ((complete & CISS_ERROR_BIT)
2227 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2228 /* if data overrun or underun on Report command
2229 ignore it
2230 */
2231 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2232 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2233 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2234 ((c->err_info->CommandStatus ==
2235 CMD_DATA_OVERRUN) ||
2236 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2237 )) {
2238 complete = c->busaddr;
2239 } else {
2240 if (c->err_info->CommandStatus ==
2241 CMD_UNSOLICITED_ABORT) {
2242 printk(KERN_WARNING "cciss%d: "
2243 "unsolicited abort %p\n",
2244 ctlr, c);
2245 if (c->retry_count < MAX_CMD_RETRIES) {
2246 printk(KERN_WARNING
2247 "cciss%d: retrying %p\n",
2248 ctlr, c);
2249 c->retry_count++;
2250 /* erase the old error */
2251 /* information */
2252 memset(c->err_info, 0,
2253 sizeof
2254 (ErrorInfo_struct));
2255 goto resend_cmd1;
2256 } else {
2257 printk(KERN_WARNING
2258 "cciss%d: retried %p too "
2259 "many times\n", ctlr, c);
2260 status = IO_ERROR;
2261 goto cleanup1;
2262 }
2263 } else if (c->err_info->CommandStatus ==
2264 CMD_UNABORTABLE) {
2265 printk(KERN_WARNING
2266 "cciss%d: command could not be aborted.\n",
2267 ctlr);
2268 status = IO_ERROR;
2269 goto cleanup1;
2270 }
2271 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2272 " Error %x \n", ctlr,
2273 c->err_info->CommandStatus);
2274 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2275 " offensive info\n"
2276 " size %x\n num %x value %x\n",
2277 ctlr,
2278 c->err_info->MoreErrInfo.Invalid_Cmd.
2279 offense_size,
2280 c->err_info->MoreErrInfo.Invalid_Cmd.
2281 offense_num,
2282 c->err_info->MoreErrInfo.Invalid_Cmd.
2283 offense_value);
2284 status = IO_ERROR;
2285 goto cleanup1;
2286 }
2287 }
2288 /* This will need changing for direct lookup completions */
2289 if (complete != c->busaddr) {
2290 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2291 BUG(); /* we are pretty much hosed if we get here. */
2292 }
2293 continue;
2294 } else
2295 done = 1;
2296 } while (!done);
2297
2298 cleanup1:
2299 /* unlock the data buffer from DMA */
2300 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2301 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2302 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2303 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2304 #ifdef CONFIG_CISS_SCSI_TAPE
2305 /* if we saved some commands for later, process them now. */
2306 if (info_p->scsi_rejects.ncompletions > 0)
2307 do_cciss_intr(0, info_p);
2308 #endif
2309 cmd_free(info_p, c, 1);
2310 return status;
2311 }
2312
2313 /*
2314 * Map (physical) PCI mem into (virtual) kernel space
2315 */
2316 static void __iomem *remap_pci_mem(ulong base, ulong size)
2317 {
2318 ulong page_base = ((ulong) base) & PAGE_MASK;
2319 ulong page_offs = ((ulong) base) - page_base;
2320 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2321
2322 return page_remapped ? (page_remapped + page_offs) : NULL;
2323 }
2324
2325 /*
2326 * Takes jobs of the Q and sends them to the hardware, then puts it on
2327 * the Q to wait for completion.
2328 */
2329 static void start_io(ctlr_info_t *h)
2330 {
2331 CommandList_struct *c;
2332
2333 while ((c = h->reqQ) != NULL) {
2334 /* can't do anything if fifo is full */
2335 if ((h->access.fifo_full(h))) {
2336 printk(KERN_WARNING "cciss: fifo full\n");
2337 break;
2338 }
2339
2340 /* Get the first entry from the Request Q */
2341 removeQ(&(h->reqQ), c);
2342 h->Qdepth--;
2343
2344 /* Tell the controller execute command */
2345 h->access.submit_command(h, c);
2346
2347 /* Put job onto the completed Q */
2348 addQ(&(h->cmpQ), c);
2349 }
2350 }
2351
2352 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2353 /* Zeros out the error record and then resends the command back */
2354 /* to the controller */
2355 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2356 {
2357 /* erase the old error information */
2358 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2359
2360 /* add it to software queue and then send it to the controller */
2361 addQ(&(h->reqQ), c);
2362 h->Qdepth++;
2363 if (h->Qdepth > h->maxQsinceinit)
2364 h->maxQsinceinit = h->Qdepth;
2365
2366 start_io(h);
2367 }
2368
2369 static inline int evaluate_target_status(CommandList_struct *cmd)
2370 {
2371 unsigned char sense_key;
2372 int error_count = 1;
2373
2374 if (cmd->err_info->ScsiStatus != 0x02) { /* not check condition? */
2375 if (!blk_pc_request(cmd->rq))
2376 printk(KERN_WARNING "cciss: cmd %p "
2377 "has SCSI Status 0x%x\n",
2378 cmd, cmd->err_info->ScsiStatus);
2379 return error_count;
2380 }
2381
2382 /* check the sense key */
2383 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2384 /* no status or recovered error */
2385 if ((sense_key == 0x0) || (sense_key == 0x1))
2386 error_count = 0;
2387
2388 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2389 if (error_count != 0)
2390 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2391 " sense key = 0x%x\n", cmd, sense_key);
2392 return error_count;
2393 }
2394
2395 /* SG_IO or similar, copy sense data back */
2396 if (cmd->rq->sense) {
2397 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2398 cmd->rq->sense_len = cmd->err_info->SenseLen;
2399 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2400 cmd->rq->sense_len);
2401 } else
2402 cmd->rq->sense_len = 0;
2403
2404 return error_count;
2405 }
2406
2407 /* checks the status of the job and calls complete buffers to mark all
2408 * buffers for the completed job. Note that this function does not need
2409 * to hold the hba/queue lock.
2410 */
2411 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2412 int timeout)
2413 {
2414 int retry_cmd = 0;
2415 struct request *rq = cmd->rq;
2416
2417 rq->errors = 0;
2418
2419 if (timeout)
2420 rq->errors = 1;
2421
2422 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2423 goto after_error_processing;
2424
2425 switch (cmd->err_info->CommandStatus) {
2426 case CMD_TARGET_STATUS:
2427 rq->errors = evaluate_target_status(cmd);
2428 break;
2429 case CMD_DATA_UNDERRUN:
2430 if (blk_fs_request(cmd->rq)) {
2431 printk(KERN_WARNING "cciss: cmd %p has"
2432 " completed with data underrun "
2433 "reported\n", cmd);
2434 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2435 }
2436 break;
2437 case CMD_DATA_OVERRUN:
2438 if (blk_fs_request(cmd->rq))
2439 printk(KERN_WARNING "cciss: cmd %p has"
2440 " completed with data overrun "
2441 "reported\n", cmd);
2442 break;
2443 case CMD_INVALID:
2444 printk(KERN_WARNING "cciss: cmd %p is "
2445 "reported invalid\n", cmd);
2446 rq->errors = 1;
2447 break;
2448 case CMD_PROTOCOL_ERR:
2449 printk(KERN_WARNING "cciss: cmd %p has "
2450 "protocol error \n", cmd);
2451 rq->errors = 1;
2452 break;
2453 case CMD_HARDWARE_ERR:
2454 printk(KERN_WARNING "cciss: cmd %p had "
2455 " hardware error\n", cmd);
2456 rq->errors = 1;
2457 break;
2458 case CMD_CONNECTION_LOST:
2459 printk(KERN_WARNING "cciss: cmd %p had "
2460 "connection lost\n", cmd);
2461 rq->errors = 1;
2462 break;
2463 case CMD_ABORTED:
2464 printk(KERN_WARNING "cciss: cmd %p was "
2465 "aborted\n", cmd);
2466 rq->errors = 1;
2467 break;
2468 case CMD_ABORT_FAILED:
2469 printk(KERN_WARNING "cciss: cmd %p reports "
2470 "abort failed\n", cmd);
2471 rq->errors = 1;
2472 break;
2473 case CMD_UNSOLICITED_ABORT:
2474 printk(KERN_WARNING "cciss%d: unsolicited "
2475 "abort %p\n", h->ctlr, cmd);
2476 if (cmd->retry_count < MAX_CMD_RETRIES) {
2477 retry_cmd = 1;
2478 printk(KERN_WARNING
2479 "cciss%d: retrying %p\n", h->ctlr, cmd);
2480 cmd->retry_count++;
2481 } else
2482 printk(KERN_WARNING
2483 "cciss%d: %p retried too "
2484 "many times\n", h->ctlr, cmd);
2485 rq->errors = 1;
2486 break;
2487 case CMD_TIMEOUT:
2488 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2489 rq->errors = 1;
2490 break;
2491 default:
2492 printk(KERN_WARNING "cciss: cmd %p returned "
2493 "unknown status %x\n", cmd,
2494 cmd->err_info->CommandStatus);
2495 rq->errors = 1;
2496 }
2497
2498 after_error_processing:
2499
2500 /* We need to return this command */
2501 if (retry_cmd) {
2502 resend_cciss_cmd(h, cmd);
2503 return;
2504 }
2505 cmd->rq->data_len = 0;
2506 cmd->rq->completion_data = cmd;
2507 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2508 blk_complete_request(cmd->rq);
2509 }
2510
2511 /*
2512 * Get a request and submit it to the controller.
2513 */
2514 static void do_cciss_request(struct request_queue *q)
2515 {
2516 ctlr_info_t *h = q->queuedata;
2517 CommandList_struct *c;
2518 sector_t start_blk;
2519 int seg;
2520 struct request *creq;
2521 u64bit temp64;
2522 struct scatterlist tmp_sg[MAXSGENTRIES];
2523 drive_info_struct *drv;
2524 int i, dir;
2525
2526 /* We call start_io here in case there is a command waiting on the
2527 * queue that has not been sent.
2528 */
2529 if (blk_queue_plugged(q))
2530 goto startio;
2531
2532 queue:
2533 creq = elv_next_request(q);
2534 if (!creq)
2535 goto startio;
2536
2537 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2538
2539 if ((c = cmd_alloc(h, 1)) == NULL)
2540 goto full;
2541
2542 blkdev_dequeue_request(creq);
2543
2544 spin_unlock_irq(q->queue_lock);
2545
2546 c->cmd_type = CMD_RWREQ;
2547 c->rq = creq;
2548
2549 /* fill in the request */
2550 drv = creq->rq_disk->private_data;
2551 c->Header.ReplyQueue = 0; // unused in simple mode
2552 /* got command from pool, so use the command block index instead */
2553 /* for direct lookups. */
2554 /* The first 2 bits are reserved for controller error reporting. */
2555 c->Header.Tag.lower = (c->cmdindex << 3);
2556 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2557 c->Header.LUN.LogDev.VolId = drv->LunID;
2558 c->Header.LUN.LogDev.Mode = 1;
2559 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2560 c->Request.Type.Type = TYPE_CMD; // It is a command.
2561 c->Request.Type.Attribute = ATTR_SIMPLE;
2562 c->Request.Type.Direction =
2563 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2564 c->Request.Timeout = 0; // Don't time out
2565 c->Request.CDB[0] =
2566 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2567 start_blk = creq->sector;
2568 #ifdef CCISS_DEBUG
2569 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2570 (int)creq->nr_sectors);
2571 #endif /* CCISS_DEBUG */
2572
2573 seg = blk_rq_map_sg(q, creq, tmp_sg);
2574
2575 /* get the DMA records for the setup */
2576 if (c->Request.Type.Direction == XFER_READ)
2577 dir = PCI_DMA_FROMDEVICE;
2578 else
2579 dir = PCI_DMA_TODEVICE;
2580
2581 for (i = 0; i < seg; i++) {
2582 c->SG[i].Len = tmp_sg[i].length;
2583 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2584 tmp_sg[i].offset,
2585 tmp_sg[i].length, dir);
2586 c->SG[i].Addr.lower = temp64.val32.lower;
2587 c->SG[i].Addr.upper = temp64.val32.upper;
2588 c->SG[i].Ext = 0; // we are not chaining
2589 }
2590 /* track how many SG entries we are using */
2591 if (seg > h->maxSG)
2592 h->maxSG = seg;
2593
2594 #ifdef CCISS_DEBUG
2595 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2596 creq->nr_sectors, seg);
2597 #endif /* CCISS_DEBUG */
2598
2599 c->Header.SGList = c->Header.SGTotal = seg;
2600 if (likely(blk_fs_request(creq))) {
2601 if(h->cciss_read == CCISS_READ_10) {
2602 c->Request.CDB[1] = 0;
2603 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2604 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2605 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2606 c->Request.CDB[5] = start_blk & 0xff;
2607 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2608 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2609 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2610 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2611 } else {
2612 c->Request.CDBLen = 16;
2613 c->Request.CDB[1]= 0;
2614 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2615 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2616 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2617 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2618 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2619 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2620 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2621 c->Request.CDB[9]= start_blk & 0xff;
2622 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2623 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2624 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2625 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2626 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2627 }
2628 } else if (blk_pc_request(creq)) {
2629 c->Request.CDBLen = creq->cmd_len;
2630 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2631 } else {
2632 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2633 BUG();
2634 }
2635
2636 spin_lock_irq(q->queue_lock);
2637
2638 addQ(&(h->reqQ), c);
2639 h->Qdepth++;
2640 if (h->Qdepth > h->maxQsinceinit)
2641 h->maxQsinceinit = h->Qdepth;
2642
2643 goto queue;
2644 full:
2645 blk_stop_queue(q);
2646 startio:
2647 /* We will already have the driver lock here so not need
2648 * to lock it.
2649 */
2650 start_io(h);
2651 }
2652
2653 static inline unsigned long get_next_completion(ctlr_info_t *h)
2654 {
2655 #ifdef CONFIG_CISS_SCSI_TAPE
2656 /* Any rejects from sendcmd() lying around? Process them first */
2657 if (h->scsi_rejects.ncompletions == 0)
2658 return h->access.command_completed(h);
2659 else {
2660 struct sendcmd_reject_list *srl;
2661 int n;
2662 srl = &h->scsi_rejects;
2663 n = --srl->ncompletions;
2664 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2665 printk("p");
2666 return srl->complete[n];
2667 }
2668 #else
2669 return h->access.command_completed(h);
2670 #endif
2671 }
2672
2673 static inline int interrupt_pending(ctlr_info_t *h)
2674 {
2675 #ifdef CONFIG_CISS_SCSI_TAPE
2676 return (h->access.intr_pending(h)
2677 || (h->scsi_rejects.ncompletions > 0));
2678 #else
2679 return h->access.intr_pending(h);
2680 #endif
2681 }
2682
2683 static inline long interrupt_not_for_us(ctlr_info_t *h)
2684 {
2685 #ifdef CONFIG_CISS_SCSI_TAPE
2686 return (((h->access.intr_pending(h) == 0) ||
2687 (h->interrupts_enabled == 0))
2688 && (h->scsi_rejects.ncompletions == 0));
2689 #else
2690 return (((h->access.intr_pending(h) == 0) ||
2691 (h->interrupts_enabled == 0)));
2692 #endif
2693 }
2694
2695 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2696 {
2697 ctlr_info_t *h = dev_id;
2698 CommandList_struct *c;
2699 unsigned long flags;
2700 __u32 a, a1, a2;
2701
2702 if (interrupt_not_for_us(h))
2703 return IRQ_NONE;
2704 /*
2705 * If there are completed commands in the completion queue,
2706 * we had better do something about it.
2707 */
2708 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2709 while (interrupt_pending(h)) {
2710 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2711 a1 = a;
2712 if ((a & 0x04)) {
2713 a2 = (a >> 3);
2714 if (a2 >= h->nr_cmds) {
2715 printk(KERN_WARNING
2716 "cciss: controller cciss%d failed, stopping.\n",
2717 h->ctlr);
2718 fail_all_cmds(h->ctlr);
2719 return IRQ_HANDLED;
2720 }
2721
2722 c = h->cmd_pool + a2;
2723 a = c->busaddr;
2724
2725 } else {
2726 a &= ~3;
2727 if ((c = h->cmpQ) == NULL) {
2728 printk(KERN_WARNING
2729 "cciss: Completion of %08x ignored\n",
2730 a1);
2731 continue;
2732 }
2733 while (c->busaddr != a) {
2734 c = c->next;
2735 if (c == h->cmpQ)
2736 break;
2737 }
2738 }
2739 /*
2740 * If we've found the command, take it off the
2741 * completion Q and free it
2742 */
2743 if (c->busaddr == a) {
2744 removeQ(&h->cmpQ, c);
2745 if (c->cmd_type == CMD_RWREQ) {
2746 complete_command(h, c, 0);
2747 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2748 complete(c->waiting);
2749 }
2750 # ifdef CONFIG_CISS_SCSI_TAPE
2751 else if (c->cmd_type == CMD_SCSI)
2752 complete_scsi_command(c, 0, a1);
2753 # endif
2754 continue;
2755 }
2756 }
2757 }
2758
2759 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2760 return IRQ_HANDLED;
2761 }
2762
2763 /*
2764 * We cannot read the structure directly, for portability we must use
2765 * the io functions.
2766 * This is for debug only.
2767 */
2768 #ifdef CCISS_DEBUG
2769 static void print_cfg_table(CfgTable_struct *tb)
2770 {
2771 int i;
2772 char temp_name[17];
2773
2774 printk("Controller Configuration information\n");
2775 printk("------------------------------------\n");
2776 for (i = 0; i < 4; i++)
2777 temp_name[i] = readb(&(tb->Signature[i]));
2778 temp_name[4] = '\0';
2779 printk(" Signature = %s\n", temp_name);
2780 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2781 printk(" Transport methods supported = 0x%x\n",
2782 readl(&(tb->TransportSupport)));
2783 printk(" Transport methods active = 0x%x\n",
2784 readl(&(tb->TransportActive)));
2785 printk(" Requested transport Method = 0x%x\n",
2786 readl(&(tb->HostWrite.TransportRequest)));
2787 printk(" Coalesce Interrupt Delay = 0x%x\n",
2788 readl(&(tb->HostWrite.CoalIntDelay)));
2789 printk(" Coalesce Interrupt Count = 0x%x\n",
2790 readl(&(tb->HostWrite.CoalIntCount)));
2791 printk(" Max outstanding commands = 0x%d\n",
2792 readl(&(tb->CmdsOutMax)));
2793 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2794 for (i = 0; i < 16; i++)
2795 temp_name[i] = readb(&(tb->ServerName[i]));
2796 temp_name[16] = '\0';
2797 printk(" Server Name = %s\n", temp_name);
2798 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2799 }
2800 #endif /* CCISS_DEBUG */
2801
2802 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2803 {
2804 int i, offset, mem_type, bar_type;
2805 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2806 return 0;
2807 offset = 0;
2808 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2809 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2810 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2811 offset += 4;
2812 else {
2813 mem_type = pci_resource_flags(pdev, i) &
2814 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2815 switch (mem_type) {
2816 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2817 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2818 offset += 4; /* 32 bit */
2819 break;
2820 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2821 offset += 8;
2822 break;
2823 default: /* reserved in PCI 2.2 */
2824 printk(KERN_WARNING
2825 "Base address is invalid\n");
2826 return -1;
2827 break;
2828 }
2829 }
2830 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2831 return i + 1;
2832 }
2833 return -1;
2834 }
2835
2836 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2837 * controllers that are capable. If not, we use IO-APIC mode.
2838 */
2839
2840 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2841 struct pci_dev *pdev, __u32 board_id)
2842 {
2843 #ifdef CONFIG_PCI_MSI
2844 int err;
2845 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2846 {0, 2}, {0, 3}
2847 };
2848
2849 /* Some boards advertise MSI but don't really support it */
2850 if ((board_id == 0x40700E11) ||
2851 (board_id == 0x40800E11) ||
2852 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2853 goto default_int_mode;
2854
2855 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2856 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2857 if (!err) {
2858 c->intr[0] = cciss_msix_entries[0].vector;
2859 c->intr[1] = cciss_msix_entries[1].vector;
2860 c->intr[2] = cciss_msix_entries[2].vector;
2861 c->intr[3] = cciss_msix_entries[3].vector;
2862 c->msix_vector = 1;
2863 return;
2864 }
2865 if (err > 0) {
2866 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2867 "available\n", err);
2868 goto default_int_mode;
2869 } else {
2870 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2871 err);
2872 goto default_int_mode;
2873 }
2874 }
2875 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2876 if (!pci_enable_msi(pdev)) {
2877 c->msi_vector = 1;
2878 } else {
2879 printk(KERN_WARNING "cciss: MSI init failed\n");
2880 }
2881 }
2882 default_int_mode:
2883 #endif /* CONFIG_PCI_MSI */
2884 /* if we get here we're going to use the default interrupt mode */
2885 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2886 return;
2887 }
2888
2889 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2890 {
2891 ushort subsystem_vendor_id, subsystem_device_id, command;
2892 __u32 board_id, scratchpad = 0;
2893 __u64 cfg_offset;
2894 __u32 cfg_base_addr;
2895 __u64 cfg_base_addr_index;
2896 int i, err;
2897
2898 /* check to see if controller has been disabled */
2899 /* BEFORE trying to enable it */
2900 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2901 if (!(command & 0x02)) {
2902 printk(KERN_WARNING
2903 "cciss: controller appears to be disabled\n");
2904 return -ENODEV;
2905 }
2906
2907 err = pci_enable_device(pdev);
2908 if (err) {
2909 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2910 return err;
2911 }
2912
2913 err = pci_request_regions(pdev, "cciss");
2914 if (err) {
2915 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2916 "aborting\n");
2917 return err;
2918 }
2919
2920 subsystem_vendor_id = pdev->subsystem_vendor;
2921 subsystem_device_id = pdev->subsystem_device;
2922 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2923 subsystem_vendor_id);
2924
2925 #ifdef CCISS_DEBUG
2926 printk("command = %x\n", command);
2927 printk("irq = %x\n", pdev->irq);
2928 printk("board_id = %x\n", board_id);
2929 #endif /* CCISS_DEBUG */
2930
2931 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2932 * else we use the IO-APIC interrupt assigned to us by system ROM.
2933 */
2934 cciss_interrupt_mode(c, pdev, board_id);
2935
2936 /*
2937 * Memory base addr is first addr , the second points to the config
2938 * table
2939 */
2940
2941 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2942 #ifdef CCISS_DEBUG
2943 printk("address 0 = %x\n", c->paddr);
2944 #endif /* CCISS_DEBUG */
2945 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2946
2947 /* Wait for the board to become ready. (PCI hotplug needs this.)
2948 * We poll for up to 120 secs, once per 100ms. */
2949 for (i = 0; i < 1200; i++) {
2950 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2951 if (scratchpad == CCISS_FIRMWARE_READY)
2952 break;
2953 set_current_state(TASK_INTERRUPTIBLE);
2954 schedule_timeout(HZ / 10); /* wait 100ms */
2955 }
2956 if (scratchpad != CCISS_FIRMWARE_READY) {
2957 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2958 err = -ENODEV;
2959 goto err_out_free_res;
2960 }
2961
2962 /* get the address index number */
2963 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2964 cfg_base_addr &= (__u32) 0x0000ffff;
2965 #ifdef CCISS_DEBUG
2966 printk("cfg base address = %x\n", cfg_base_addr);
2967 #endif /* CCISS_DEBUG */
2968 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2969 #ifdef CCISS_DEBUG
2970 printk("cfg base address index = %x\n", cfg_base_addr_index);
2971 #endif /* CCISS_DEBUG */
2972 if (cfg_base_addr_index == -1) {
2973 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2974 err = -ENODEV;
2975 goto err_out_free_res;
2976 }
2977
2978 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2979 #ifdef CCISS_DEBUG
2980 printk("cfg offset = %x\n", cfg_offset);
2981 #endif /* CCISS_DEBUG */
2982 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2983 cfg_base_addr_index) +
2984 cfg_offset, sizeof(CfgTable_struct));
2985 c->board_id = board_id;
2986
2987 #ifdef CCISS_DEBUG
2988 print_cfg_table(c->cfgtable);
2989 #endif /* CCISS_DEBUG */
2990
2991 for (i = 0; i < ARRAY_SIZE(products); i++) {
2992 if (board_id == products[i].board_id) {
2993 c->product_name = products[i].product_name;
2994 c->access = *(products[i].access);
2995 c->nr_cmds = products[i].nr_cmds;
2996 break;
2997 }
2998 }
2999 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3000 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3001 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3002 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3003 printk("Does not appear to be a valid CISS config table\n");
3004 err = -ENODEV;
3005 goto err_out_free_res;
3006 }
3007 /* We didn't find the controller in our list. We know the
3008 * signature is valid. If it's an HP device let's try to
3009 * bind to the device and fire it up. Otherwise we bail.
3010 */
3011 if (i == ARRAY_SIZE(products)) {
3012 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3013 c->product_name = products[i-1].product_name;
3014 c->access = *(products[i-1].access);
3015 c->nr_cmds = products[i-1].nr_cmds;
3016 printk(KERN_WARNING "cciss: This is an unknown "
3017 "Smart Array controller.\n"
3018 "cciss: Please update to the latest driver "
3019 "available from www.hp.com.\n");
3020 } else {
3021 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3022 " to access the Smart Array controller %08lx\n"
3023 , (unsigned long)board_id);
3024 err = -ENODEV;
3025 goto err_out_free_res;
3026 }
3027 }
3028 #ifdef CONFIG_X86
3029 {
3030 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3031 __u32 prefetch;
3032 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3033 prefetch |= 0x100;
3034 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3035 }
3036 #endif
3037
3038 /* Disabling DMA prefetch for the P600
3039 * An ASIC bug may result in a prefetch beyond
3040 * physical memory.
3041 */
3042 if(board_id == 0x3225103C) {
3043 __u32 dma_prefetch;
3044 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3045 dma_prefetch |= 0x8000;
3046 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3047 }
3048
3049 #ifdef CCISS_DEBUG
3050 printk("Trying to put board into Simple mode\n");
3051 #endif /* CCISS_DEBUG */
3052 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3053 /* Update the field, and then ring the doorbell */
3054 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3055 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3056
3057 /* under certain very rare conditions, this can take awhile.
3058 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3059 * as we enter this code.) */
3060 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3061 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3062 break;
3063 /* delay and try again */
3064 set_current_state(TASK_INTERRUPTIBLE);
3065 schedule_timeout(10);
3066 }
3067
3068 #ifdef CCISS_DEBUG
3069 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3070 readl(c->vaddr + SA5_DOORBELL));
3071 #endif /* CCISS_DEBUG */
3072 #ifdef CCISS_DEBUG
3073 print_cfg_table(c->cfgtable);
3074 #endif /* CCISS_DEBUG */
3075
3076 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3077 printk(KERN_WARNING "cciss: unable to get board into"
3078 " simple mode\n");
3079 err = -ENODEV;
3080 goto err_out_free_res;
3081 }
3082 return 0;
3083
3084 err_out_free_res:
3085 /*
3086 * Deliberately omit pci_disable_device(): it does something nasty to
3087 * Smart Array controllers that pci_enable_device does not undo
3088 */
3089 pci_release_regions(pdev);
3090 return err;
3091 }
3092
3093 /*
3094 * Gets information about the local volumes attached to the controller.
3095 */
3096 static void cciss_getgeometry(int cntl_num)
3097 {
3098 ReportLunData_struct *ld_buff;
3099 InquiryData_struct *inq_buff;
3100 int return_code;
3101 int i;
3102 int listlength = 0;
3103 __u32 lunid = 0;
3104 int block_size;
3105 sector_t total_size;
3106
3107 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3108 if (ld_buff == NULL) {
3109 printk(KERN_ERR "cciss: out of memory\n");
3110 return;
3111 }
3112 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3113 if (inq_buff == NULL) {
3114 printk(KERN_ERR "cciss: out of memory\n");
3115 kfree(ld_buff);
3116 return;
3117 }
3118 /* Get the firmware version */
3119 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3120 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3121 TYPE_CMD);
3122 if (return_code == IO_OK) {
3123 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3124 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3125 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3126 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3127 } else { /* send command failed */
3128
3129 printk(KERN_WARNING "cciss: unable to determine firmware"
3130 " version of controller\n");
3131 }
3132 /* Get the number of logical volumes */
3133 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3134 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3135 TYPE_CMD);
3136
3137 if (return_code == IO_OK) {
3138 #ifdef CCISS_DEBUG
3139 printk("LUN Data\n--------------------------\n");
3140 #endif /* CCISS_DEBUG */
3141
3142 listlength |=
3143 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3144 listlength |=
3145 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3146 listlength |=
3147 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3148 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3149 } else { /* reading number of logical volumes failed */
3150
3151 printk(KERN_WARNING "cciss: report logical volume"
3152 " command failed\n");
3153 listlength = 0;
3154 }
3155 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3156 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3157 printk(KERN_ERR
3158 "ciss: only %d number of logical volumes supported\n",
3159 CISS_MAX_LUN);
3160 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3161 }
3162 #ifdef CCISS_DEBUG
3163 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3164 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3165 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3166 hba[cntl_num]->num_luns);
3167 #endif /* CCISS_DEBUG */
3168
3169 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3170 for (i = 0; i < CISS_MAX_LUN; i++) {
3171 if (i < hba[cntl_num]->num_luns) {
3172 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3173 << 24;
3174 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3175 << 16;
3176 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3177 << 8;
3178 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3179
3180 hba[cntl_num]->drv[i].LunID = lunid;
3181
3182 #ifdef CCISS_DEBUG
3183 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3184 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3185 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3186 hba[cntl_num]->drv[i].LunID);
3187 #endif /* CCISS_DEBUG */
3188
3189 /* testing to see if 16-byte CDBs are already being used */
3190 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3191 cciss_read_capacity_16(cntl_num, i, 0,
3192 &total_size, &block_size);
3193 goto geo_inq;
3194 }
3195 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3196
3197 /* If read_capacity returns all F's the logical is >2TB */
3198 /* so we switch to 16-byte CDBs for all read/write ops */
3199 if(total_size == 0xFFFFFFFFULL) {
3200 cciss_read_capacity_16(cntl_num, i, 0,
3201 &total_size, &block_size);
3202 hba[cntl_num]->cciss_read = CCISS_READ_16;
3203 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3204 } else {
3205 hba[cntl_num]->cciss_read = CCISS_READ_10;
3206 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3207 }
3208 geo_inq:
3209 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3210 block_size, inq_buff,
3211 &hba[cntl_num]->drv[i]);
3212 } else {
3213 /* initialize raid_level to indicate a free space */
3214 hba[cntl_num]->drv[i].raid_level = -1;
3215 }
3216 }
3217 kfree(ld_buff);
3218 kfree(inq_buff);
3219 }
3220
3221 /* Function to find the first free pointer into our hba[] array */
3222 /* Returns -1 if no free entries are left. */
3223 static int alloc_cciss_hba(void)
3224 {
3225 int i;
3226
3227 for (i = 0; i < MAX_CTLR; i++) {
3228 if (!hba[i]) {
3229 ctlr_info_t *p;
3230
3231 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3232 if (!p)
3233 goto Enomem;
3234 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3235 if (!p->gendisk[0]) {
3236 kfree(p);
3237 goto Enomem;
3238 }
3239 hba[i] = p;
3240 return i;
3241 }
3242 }
3243 printk(KERN_WARNING "cciss: This driver supports a maximum"
3244 " of %d controllers.\n", MAX_CTLR);
3245 return -1;
3246 Enomem:
3247 printk(KERN_ERR "cciss: out of memory.\n");
3248 return -1;
3249 }
3250
3251 static void free_hba(int i)
3252 {
3253 ctlr_info_t *p = hba[i];
3254 int n;
3255
3256 hba[i] = NULL;
3257 for (n = 0; n < CISS_MAX_LUN; n++)
3258 put_disk(p->gendisk[n]);
3259 kfree(p);
3260 }
3261
3262 /*
3263 * This is it. Find all the controllers and register them. I really hate
3264 * stealing all these major device numbers.
3265 * returns the number of block devices registered.
3266 */
3267 static int __devinit cciss_init_one(struct pci_dev *pdev,
3268 const struct pci_device_id *ent)
3269 {
3270 int i;
3271 int j = 0;
3272 int rc;
3273 int dac;
3274
3275 i = alloc_cciss_hba();
3276 if (i < 0)
3277 return -1;
3278
3279 hba[i]->busy_initializing = 1;
3280
3281 if (cciss_pci_init(hba[i], pdev) != 0)
3282 goto clean1;
3283
3284 sprintf(hba[i]->devname, "cciss%d", i);
3285 hba[i]->ctlr = i;
3286 hba[i]->pdev = pdev;
3287
3288 /* configure PCI DMA stuff */
3289 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3290 dac = 1;
3291 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3292 dac = 0;
3293 else {
3294 printk(KERN_ERR "cciss: no suitable DMA available\n");
3295 goto clean1;
3296 }
3297
3298 /*
3299 * register with the major number, or get a dynamic major number
3300 * by passing 0 as argument. This is done for greater than
3301 * 8 controller support.
3302 */
3303 if (i < MAX_CTLR_ORIG)
3304 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3305 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3306 if (rc == -EBUSY || rc == -EINVAL) {
3307 printk(KERN_ERR
3308 "cciss: Unable to get major number %d for %s "
3309 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3310 goto clean1;
3311 } else {
3312 if (i >= MAX_CTLR_ORIG)
3313 hba[i]->major = rc;
3314 }
3315
3316 /* make sure the board interrupts are off */
3317 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3318 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3319 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3320 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3321 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3322 goto clean2;
3323 }
3324
3325 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3326 hba[i]->devname, pdev->device, pci_name(pdev),
3327 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3328
3329 hba[i]->cmd_pool_bits =
3330 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3331 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3332 hba[i]->cmd_pool = (CommandList_struct *)
3333 pci_alloc_consistent(hba[i]->pdev,
3334 hba[i]->nr_cmds * sizeof(CommandList_struct),
3335 &(hba[i]->cmd_pool_dhandle));
3336 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3337 pci_alloc_consistent(hba[i]->pdev,
3338 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3339 &(hba[i]->errinfo_pool_dhandle));
3340 if ((hba[i]->cmd_pool_bits == NULL)
3341 || (hba[i]->cmd_pool == NULL)
3342 || (hba[i]->errinfo_pool == NULL)) {
3343 printk(KERN_ERR "cciss: out of memory");
3344 goto clean4;
3345 }
3346 #ifdef CONFIG_CISS_SCSI_TAPE
3347 hba[i]->scsi_rejects.complete =
3348 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3349 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3350 if (hba[i]->scsi_rejects.complete == NULL) {
3351 printk(KERN_ERR "cciss: out of memory");
3352 goto clean4;
3353 }
3354 #endif
3355 spin_lock_init(&hba[i]->lock);
3356
3357 /* Initialize the pdev driver private data.
3358 have it point to hba[i]. */
3359 pci_set_drvdata(pdev, hba[i]);
3360 /* command and error info recs zeroed out before
3361 they are used */
3362 memset(hba[i]->cmd_pool_bits, 0,
3363 ((hba[i]->nr_cmds + BITS_PER_LONG -
3364 1) / BITS_PER_LONG) * sizeof(unsigned long));
3365
3366 #ifdef CCISS_DEBUG
3367 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3368 #endif /* CCISS_DEBUG */
3369
3370 cciss_getgeometry(i);
3371
3372 cciss_scsi_setup(i);
3373
3374 /* Turn the interrupts on so we can service requests */
3375 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3376
3377 cciss_procinit(i);
3378
3379 hba[i]->cciss_max_sectors = 2048;
3380
3381 hba[i]->busy_initializing = 0;
3382
3383 do {
3384 drive_info_struct *drv = &(hba[i]->drv[j]);
3385 struct gendisk *disk = hba[i]->gendisk[j];
3386 struct request_queue *q;
3387
3388 /* Check if the disk was allocated already */
3389 if (!disk){
3390 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3391 disk = hba[i]->gendisk[j];
3392 }
3393
3394 /* Check that the disk was able to be allocated */
3395 if (!disk) {
3396 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3397 goto clean4;
3398 }
3399
3400 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3401 if (!q) {
3402 printk(KERN_ERR
3403 "cciss: unable to allocate queue for disk %d\n",
3404 j);
3405 goto clean4;
3406 }
3407 drv->queue = q;
3408
3409 q->backing_dev_info.ra_pages = READ_AHEAD;
3410 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3411
3412 /* This is a hardware imposed limit. */
3413 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3414
3415 /* This is a limit in the driver and could be eliminated. */
3416 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3417
3418 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3419
3420 blk_queue_softirq_done(q, cciss_softirq_done);
3421
3422 q->queuedata = hba[i];
3423 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3424 disk->major = hba[i]->major;
3425 disk->first_minor = j << NWD_SHIFT;
3426 disk->fops = &cciss_fops;
3427 disk->queue = q;
3428 disk->private_data = drv;
3429 disk->driverfs_dev = &pdev->dev;
3430 /* we must register the controller even if no disks exist */
3431 /* this is for the online array utilities */
3432 if (!drv->heads && j)
3433 continue;
3434 blk_queue_hardsect_size(q, drv->block_size);
3435 set_capacity(disk, drv->nr_blocks);
3436 add_disk(disk);
3437 j++;
3438 } while (j <= hba[i]->highest_lun);
3439
3440 return 1;
3441
3442 clean4:
3443 #ifdef CONFIG_CISS_SCSI_TAPE
3444 kfree(hba[i]->scsi_rejects.complete);
3445 #endif
3446 kfree(hba[i]->cmd_pool_bits);
3447 if (hba[i]->cmd_pool)
3448 pci_free_consistent(hba[i]->pdev,
3449 hba[i]->nr_cmds * sizeof(CommandList_struct),
3450 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3451 if (hba[i]->errinfo_pool)
3452 pci_free_consistent(hba[i]->pdev,
3453 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3454 hba[i]->errinfo_pool,
3455 hba[i]->errinfo_pool_dhandle);
3456 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3457 clean2:
3458 unregister_blkdev(hba[i]->major, hba[i]->devname);
3459 clean1:
3460 hba[i]->busy_initializing = 0;
3461 /* cleanup any queues that may have been initialized */
3462 for (j=0; j <= hba[i]->highest_lun; j++){
3463 drive_info_struct *drv = &(hba[i]->drv[j]);
3464 if (drv->queue)
3465 blk_cleanup_queue(drv->queue);
3466 }
3467 /*
3468 * Deliberately omit pci_disable_device(): it does something nasty to
3469 * Smart Array controllers that pci_enable_device does not undo
3470 */
3471 pci_release_regions(pdev);
3472 pci_set_drvdata(pdev, NULL);
3473 free_hba(i);
3474 return -1;
3475 }
3476
3477 static void cciss_shutdown(struct pci_dev *pdev)
3478 {
3479 ctlr_info_t *tmp_ptr;
3480 int i;
3481 char flush_buf[4];
3482 int return_code;
3483
3484 tmp_ptr = pci_get_drvdata(pdev);
3485 if (tmp_ptr == NULL)
3486 return;
3487 i = tmp_ptr->ctlr;
3488 if (hba[i] == NULL)
3489 return;
3490
3491 /* Turn board interrupts off and send the flush cache command */
3492 /* sendcmd will turn off interrupt, and send the flush...
3493 * To write all data in the battery backed cache to disks */
3494 memset(flush_buf, 0, 4);
3495 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3496 TYPE_CMD);
3497 if (return_code == IO_OK) {
3498 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3499 } else {
3500 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3501 }
3502 free_irq(hba[i]->intr[2], hba[i]);
3503 }
3504
3505 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3506 {
3507 ctlr_info_t *tmp_ptr;
3508 int i, j;
3509
3510 if (pci_get_drvdata(pdev) == NULL) {
3511 printk(KERN_ERR "cciss: Unable to remove device \n");
3512 return;
3513 }
3514 tmp_ptr = pci_get_drvdata(pdev);
3515 i = tmp_ptr->ctlr;
3516 if (hba[i] == NULL) {
3517 printk(KERN_ERR "cciss: device appears to "
3518 "already be removed \n");
3519 return;
3520 }
3521
3522 remove_proc_entry(hba[i]->devname, proc_cciss);
3523 unregister_blkdev(hba[i]->major, hba[i]->devname);
3524
3525 /* remove it from the disk list */
3526 for (j = 0; j < CISS_MAX_LUN; j++) {
3527 struct gendisk *disk = hba[i]->gendisk[j];
3528 if (disk) {
3529 struct request_queue *q = disk->queue;
3530
3531 if (disk->flags & GENHD_FL_UP)
3532 del_gendisk(disk);
3533 if (q)
3534 blk_cleanup_queue(q);
3535 }
3536 }
3537
3538 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3539
3540 cciss_shutdown(pdev);
3541
3542 #ifdef CONFIG_PCI_MSI
3543 if (hba[i]->msix_vector)
3544 pci_disable_msix(hba[i]->pdev);
3545 else if (hba[i]->msi_vector)
3546 pci_disable_msi(hba[i]->pdev);
3547 #endif /* CONFIG_PCI_MSI */
3548
3549 iounmap(hba[i]->vaddr);
3550
3551 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3552 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3553 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3554 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3555 kfree(hba[i]->cmd_pool_bits);
3556 #ifdef CONFIG_CISS_SCSI_TAPE
3557 kfree(hba[i]->scsi_rejects.complete);
3558 #endif
3559 /*
3560 * Deliberately omit pci_disable_device(): it does something nasty to
3561 * Smart Array controllers that pci_enable_device does not undo
3562 */
3563 pci_release_regions(pdev);
3564 pci_set_drvdata(pdev, NULL);
3565 free_hba(i);
3566 }
3567
3568 static struct pci_driver cciss_pci_driver = {
3569 .name = "cciss",
3570 .probe = cciss_init_one,
3571 .remove = __devexit_p(cciss_remove_one),
3572 .id_table = cciss_pci_device_id, /* id_table */
3573 .shutdown = cciss_shutdown,
3574 };
3575
3576 /*
3577 * This is it. Register the PCI driver information for the cards we control
3578 * the OS will call our registered routines when it finds one of our cards.
3579 */
3580 static int __init cciss_init(void)
3581 {
3582 printk(KERN_INFO DRIVER_NAME "\n");
3583
3584 /* Register for our PCI devices */
3585 return pci_register_driver(&cciss_pci_driver);
3586 }
3587
3588 static void __exit cciss_cleanup(void)
3589 {
3590 int i;
3591
3592 pci_unregister_driver(&cciss_pci_driver);
3593 /* double check that all controller entrys have been removed */
3594 for (i = 0; i < MAX_CTLR; i++) {
3595 if (hba[i] != NULL) {
3596 printk(KERN_WARNING "cciss: had to remove"
3597 " controller %d\n", i);
3598 cciss_remove_one(hba[i]->pdev);
3599 }
3600 }
3601 remove_proc_entry("cciss", proc_root_driver);
3602 }
3603
3604 static void fail_all_cmds(unsigned long ctlr)
3605 {
3606 /* If we get here, the board is apparently dead. */
3607 ctlr_info_t *h = hba[ctlr];
3608 CommandList_struct *c;
3609 unsigned long flags;
3610
3611 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3612 h->alive = 0; /* the controller apparently died... */
3613
3614 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3615
3616 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3617
3618 /* move everything off the request queue onto the completed queue */
3619 while ((c = h->reqQ) != NULL) {
3620 removeQ(&(h->reqQ), c);
3621 h->Qdepth--;
3622 addQ(&(h->cmpQ), c);
3623 }
3624
3625 /* Now, fail everything on the completed queue with a HW error */
3626 while ((c = h->cmpQ) != NULL) {
3627 removeQ(&h->cmpQ, c);
3628 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3629 if (c->cmd_type == CMD_RWREQ) {
3630 complete_command(h, c, 0);
3631 } else if (c->cmd_type == CMD_IOCTL_PEND)
3632 complete(c->waiting);
3633 #ifdef CONFIG_CISS_SCSI_TAPE
3634 else if (c->cmd_type == CMD_SCSI)
3635 complete_scsi_command(c, 0, 0);
3636 #endif
3637 }
3638 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3639 return;
3640 }
3641
3642 module_init(cciss_init);
3643 module_exit(cciss_cleanup);
This page took 0.11248 seconds and 5 git commands to generate.