[PATCH] cciss: disable DMA prefetch on P600
[deliverable/linux.git] / drivers / block / cciss.c
... / ...
CommitLineData
1/*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/bio.h>
33#include <linux/blkpg.h>
34#include <linux/timer.h>
35#include <linux/proc_fs.h>
36#include <linux/init.h>
37#include <linux/hdreg.h>
38#include <linux/spinlock.h>
39#include <linux/compat.h>
40#include <linux/blktrace_api.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43
44#include <linux/dma-mapping.h>
45#include <linux/blkdev.h>
46#include <linux/genhd.h>
47#include <linux/completion.h>
48
49#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
51#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
52
53/* Embedded module documentation macros - see modules.h */
54MODULE_AUTHOR("Hewlett-Packard Company");
55MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
56MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58MODULE_VERSION("3.6.14");
59MODULE_LICENSE("GPL");
60
61#include "cciss_cmd.h"
62#include "cciss.h"
63#include <linux/cciss_ioctl.h>
64
65/* define the PCI info for the cards we can control */
66static const struct pci_device_id cciss_pci_device_id[] = {
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
86 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
87 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
92
93/* board_id = Subsystem Device ID & Vendor ID
94 * product = Marketing Name for the board
95 * access = Address of the struct of function pointers
96 * nr_cmds = Number of commands supported by controller
97 */
98static struct board_type products[] = {
99 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
100 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
101 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
102 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
103 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
104 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
105 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
106 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
107 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
108 {0x3225103C, "Smart Array P600", &SA5_access, 512},
109 {0x3223103C, "Smart Array P800", &SA5_access, 512},
110 {0x3234103C, "Smart Array P400", &SA5_access, 512},
111 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
112 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
113 {0x3212103C, "Smart Array E200", &SA5_access, 120},
114 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
115 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
116 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
117 {0x3237103C, "Smart Array E500", &SA5_access, 512},
118 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
119};
120
121/* How long to wait (in milliseconds) for board to go into simple mode */
122#define MAX_CONFIG_WAIT 30000
123#define MAX_IOCTL_CONFIG_WAIT 1000
124
125/*define how many times we will try a command because of bus resets */
126#define MAX_CMD_RETRIES 3
127
128#define READ_AHEAD 1024
129#define MAX_CTLR 32
130
131/* Originally cciss driver only supports 8 major numbers */
132#define MAX_CTLR_ORIG 8
133
134static ctlr_info_t *hba[MAX_CTLR];
135
136static void do_cciss_request(request_queue_t *q);
137static irqreturn_t do_cciss_intr(int irq, void *dev_id);
138static int cciss_open(struct inode *inode, struct file *filep);
139static int cciss_release(struct inode *inode, struct file *filep);
140static int cciss_ioctl(struct inode *inode, struct file *filep,
141 unsigned int cmd, unsigned long arg);
142static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
143
144static int revalidate_allvol(ctlr_info_t *host);
145static int cciss_revalidate(struct gendisk *disk);
146static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
147static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
148 int clear_all);
149
150static void cciss_read_capacity(int ctlr, int logvol, int withirq,
151 sector_t *total_size, unsigned int *block_size);
152static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
153 sector_t *total_size, unsigned int *block_size);
154static void cciss_geometry_inquiry(int ctlr, int logvol,
155 int withirq, sector_t total_size,
156 unsigned int block_size, InquiryData_struct *inq_buff,
157 drive_info_struct *drv);
158static void cciss_getgeometry(int cntl_num);
159static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
160 __u32);
161static void start_io(ctlr_info_t *h);
162static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
163 unsigned int use_unit_num, unsigned int log_unit,
164 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
165static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
166 unsigned int use_unit_num, unsigned int log_unit,
167 __u8 page_code, int cmd_type);
168
169static void fail_all_cmds(unsigned long ctlr);
170
171#ifdef CONFIG_PROC_FS
172static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
173 int length, int *eof, void *data);
174static void cciss_procinit(int i);
175#else
176static void cciss_procinit(int i)
177{
178}
179#endif /* CONFIG_PROC_FS */
180
181#ifdef CONFIG_COMPAT
182static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
183#endif
184
185static struct block_device_operations cciss_fops = {
186 .owner = THIS_MODULE,
187 .open = cciss_open,
188 .release = cciss_release,
189 .ioctl = cciss_ioctl,
190 .getgeo = cciss_getgeo,
191#ifdef CONFIG_COMPAT
192 .compat_ioctl = cciss_compat_ioctl,
193#endif
194 .revalidate_disk = cciss_revalidate,
195};
196
197/*
198 * Enqueuing and dequeuing functions for cmdlists.
199 */
200static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
201{
202 if (*Qptr == NULL) {
203 *Qptr = c;
204 c->next = c->prev = c;
205 } else {
206 c->prev = (*Qptr)->prev;
207 c->next = (*Qptr);
208 (*Qptr)->prev->next = c;
209 (*Qptr)->prev = c;
210 }
211}
212
213static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
214 CommandList_struct *c)
215{
216 if (c && c->next != c) {
217 if (*Qptr == c)
218 *Qptr = c->next;
219 c->prev->next = c->next;
220 c->next->prev = c->prev;
221 } else {
222 *Qptr = NULL;
223 }
224 return c;
225}
226
227#include "cciss_scsi.c" /* For SCSI tape support */
228
229#ifdef CONFIG_PROC_FS
230
231/*
232 * Report information about this controller.
233 */
234#define ENG_GIG 1000000000
235#define ENG_GIG_FACTOR (ENG_GIG/512)
236#define RAID_UNKNOWN 6
237static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
238 "UNKNOWN"
239};
240
241static struct proc_dir_entry *proc_cciss;
242
243static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
244 int length, int *eof, void *data)
245{
246 off_t pos = 0;
247 off_t len = 0;
248 int size, i, ctlr;
249 ctlr_info_t *h = (ctlr_info_t *) data;
250 drive_info_struct *drv;
251 unsigned long flags;
252 sector_t vol_sz, vol_sz_frac;
253
254 ctlr = h->ctlr;
255
256 /* prevent displaying bogus info during configuration
257 * or deconfiguration of a logical volume
258 */
259 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
260 if (h->busy_configuring) {
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
262 return -EBUSY;
263 }
264 h->busy_configuring = 1;
265 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
266
267 size = sprintf(buffer, "%s: HP %s Controller\n"
268 "Board ID: 0x%08lx\n"
269 "Firmware Version: %c%c%c%c\n"
270 "IRQ: %d\n"
271 "Logical drives: %d\n"
272 "Current Q depth: %d\n"
273 "Current # commands on controller: %d\n"
274 "Max Q depth since init: %d\n"
275 "Max # commands on controller since init: %d\n"
276 "Max SG entries since init: %d\n\n",
277 h->devname,
278 h->product_name,
279 (unsigned long)h->board_id,
280 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
281 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
282 h->num_luns, h->Qdepth, h->commands_outstanding,
283 h->maxQsinceinit, h->max_outstanding, h->maxSG);
284
285 pos += size;
286 len += size;
287 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
288 for (i = 0; i <= h->highest_lun; i++) {
289
290 drv = &h->drv[i];
291 if (drv->heads == 0)
292 continue;
293
294 vol_sz = drv->nr_blocks;
295 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
296 vol_sz_frac *= 100;
297 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
298
299 if (drv->raid_level > 5)
300 drv->raid_level = RAID_UNKNOWN;
301 size = sprintf(buffer + len, "cciss/c%dd%d:"
302 "\t%4u.%02uGB\tRAID %s\n",
303 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
304 raid_label[drv->raid_level]);
305 pos += size;
306 len += size;
307 }
308
309 *eof = 1;
310 *start = buffer + offset;
311 len -= offset;
312 if (len > length)
313 len = length;
314 h->busy_configuring = 0;
315 return len;
316}
317
318static int
319cciss_proc_write(struct file *file, const char __user *buffer,
320 unsigned long count, void *data)
321{
322 unsigned char cmd[80];
323 int len;
324#ifdef CONFIG_CISS_SCSI_TAPE
325 ctlr_info_t *h = (ctlr_info_t *) data;
326 int rc;
327#endif
328
329 if (count > sizeof(cmd) - 1)
330 return -EINVAL;
331 if (copy_from_user(cmd, buffer, count))
332 return -EFAULT;
333 cmd[count] = '\0';
334 len = strlen(cmd); // above 3 lines ensure safety
335 if (len && cmd[len - 1] == '\n')
336 cmd[--len] = '\0';
337# ifdef CONFIG_CISS_SCSI_TAPE
338 if (strcmp("engage scsi", cmd) == 0) {
339 rc = cciss_engage_scsi(h->ctlr);
340 if (rc != 0)
341 return -rc;
342 return count;
343 }
344 /* might be nice to have "disengage" too, but it's not
345 safely possible. (only 1 module use count, lock issues.) */
346# endif
347 return -EINVAL;
348}
349
350/*
351 * Get us a file in /proc/cciss that says something about each controller.
352 * Create /proc/cciss if it doesn't exist yet.
353 */
354static void __devinit cciss_procinit(int i)
355{
356 struct proc_dir_entry *pde;
357
358 if (proc_cciss == NULL) {
359 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 if (!proc_cciss)
361 return;
362 }
363
364 pde = create_proc_read_entry(hba[i]->devname,
365 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
366 proc_cciss, cciss_proc_get_info, hba[i]);
367 pde->write_proc = cciss_proc_write;
368}
369#endif /* CONFIG_PROC_FS */
370
371/*
372 * For operations that cannot sleep, a command block is allocated at init,
373 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
374 * which ones are free or in use. For operations that can wait for kmalloc
375 * to possible sleep, this routine can be called with get_from_pool set to 0.
376 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
377 */
378static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
379{
380 CommandList_struct *c;
381 int i;
382 u64bit temp64;
383 dma_addr_t cmd_dma_handle, err_dma_handle;
384
385 if (!get_from_pool) {
386 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
387 sizeof(CommandList_struct), &cmd_dma_handle);
388 if (c == NULL)
389 return NULL;
390 memset(c, 0, sizeof(CommandList_struct));
391
392 c->cmdindex = -1;
393
394 c->err_info = (ErrorInfo_struct *)
395 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
396 &err_dma_handle);
397
398 if (c->err_info == NULL) {
399 pci_free_consistent(h->pdev,
400 sizeof(CommandList_struct), c, cmd_dma_handle);
401 return NULL;
402 }
403 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
404 } else { /* get it out of the controllers pool */
405
406 do {
407 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
408 if (i == h->nr_cmds)
409 return NULL;
410 } while (test_and_set_bit
411 (i & (BITS_PER_LONG - 1),
412 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
413#ifdef CCISS_DEBUG
414 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
415#endif
416 c = h->cmd_pool + i;
417 memset(c, 0, sizeof(CommandList_struct));
418 cmd_dma_handle = h->cmd_pool_dhandle
419 + i * sizeof(CommandList_struct);
420 c->err_info = h->errinfo_pool + i;
421 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
422 err_dma_handle = h->errinfo_pool_dhandle
423 + i * sizeof(ErrorInfo_struct);
424 h->nr_allocs++;
425
426 c->cmdindex = i;
427 }
428
429 c->busaddr = (__u32) cmd_dma_handle;
430 temp64.val = (__u64) err_dma_handle;
431 c->ErrDesc.Addr.lower = temp64.val32.lower;
432 c->ErrDesc.Addr.upper = temp64.val32.upper;
433 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
434
435 c->ctlr = h->ctlr;
436 return c;
437}
438
439/*
440 * Frees a command block that was previously allocated with cmd_alloc().
441 */
442static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
443{
444 int i;
445 u64bit temp64;
446
447 if (!got_from_pool) {
448 temp64.val32.lower = c->ErrDesc.Addr.lower;
449 temp64.val32.upper = c->ErrDesc.Addr.upper;
450 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
451 c->err_info, (dma_addr_t) temp64.val);
452 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
453 c, (dma_addr_t) c->busaddr);
454 } else {
455 i = c - h->cmd_pool;
456 clear_bit(i & (BITS_PER_LONG - 1),
457 h->cmd_pool_bits + (i / BITS_PER_LONG));
458 h->nr_frees++;
459 }
460}
461
462static inline ctlr_info_t *get_host(struct gendisk *disk)
463{
464 return disk->queue->queuedata;
465}
466
467static inline drive_info_struct *get_drv(struct gendisk *disk)
468{
469 return disk->private_data;
470}
471
472/*
473 * Open. Make sure the device is really there.
474 */
475static int cciss_open(struct inode *inode, struct file *filep)
476{
477 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
478 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
479
480#ifdef CCISS_DEBUG
481 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
482#endif /* CCISS_DEBUG */
483
484 if (host->busy_initializing || drv->busy_configuring)
485 return -EBUSY;
486 /*
487 * Root is allowed to open raw volume zero even if it's not configured
488 * so array config can still work. Root is also allowed to open any
489 * volume that has a LUN ID, so it can issue IOCTL to reread the
490 * disk information. I don't think I really like this
491 * but I'm already using way to many device nodes to claim another one
492 * for "raw controller".
493 */
494 if (drv->nr_blocks == 0) {
495 if (iminor(inode) != 0) { /* not node 0? */
496 /* if not node 0 make sure it is a partition = 0 */
497 if (iminor(inode) & 0x0f) {
498 return -ENXIO;
499 /* if it is, make sure we have a LUN ID */
500 } else if (drv->LunID == 0) {
501 return -ENXIO;
502 }
503 }
504 if (!capable(CAP_SYS_ADMIN))
505 return -EPERM;
506 }
507 drv->usage_count++;
508 host->usage_count++;
509 return 0;
510}
511
512/*
513 * Close. Sync first.
514 */
515static int cciss_release(struct inode *inode, struct file *filep)
516{
517 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
518 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
519
520#ifdef CCISS_DEBUG
521 printk(KERN_DEBUG "cciss_release %s\n",
522 inode->i_bdev->bd_disk->disk_name);
523#endif /* CCISS_DEBUG */
524
525 drv->usage_count--;
526 host->usage_count--;
527 return 0;
528}
529
530#ifdef CONFIG_COMPAT
531
532static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
533{
534 int ret;
535 lock_kernel();
536 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
537 unlock_kernel();
538 return ret;
539}
540
541static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
542 unsigned long arg);
543static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
544 unsigned long arg);
545
546static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
547{
548 switch (cmd) {
549 case CCISS_GETPCIINFO:
550 case CCISS_GETINTINFO:
551 case CCISS_SETINTINFO:
552 case CCISS_GETNODENAME:
553 case CCISS_SETNODENAME:
554 case CCISS_GETHEARTBEAT:
555 case CCISS_GETBUSTYPES:
556 case CCISS_GETFIRMVER:
557 case CCISS_GETDRIVVER:
558 case CCISS_REVALIDVOLS:
559 case CCISS_DEREGDISK:
560 case CCISS_REGNEWDISK:
561 case CCISS_REGNEWD:
562 case CCISS_RESCANDISK:
563 case CCISS_GETLUNINFO:
564 return do_ioctl(f, cmd, arg);
565
566 case CCISS_PASSTHRU32:
567 return cciss_ioctl32_passthru(f, cmd, arg);
568 case CCISS_BIG_PASSTHRU32:
569 return cciss_ioctl32_big_passthru(f, cmd, arg);
570
571 default:
572 return -ENOIOCTLCMD;
573 }
574}
575
576static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
577 unsigned long arg)
578{
579 IOCTL32_Command_struct __user *arg32 =
580 (IOCTL32_Command_struct __user *) arg;
581 IOCTL_Command_struct arg64;
582 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
583 int err;
584 u32 cp;
585
586 err = 0;
587 err |=
588 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
589 sizeof(arg64.LUN_info));
590 err |=
591 copy_from_user(&arg64.Request, &arg32->Request,
592 sizeof(arg64.Request));
593 err |=
594 copy_from_user(&arg64.error_info, &arg32->error_info,
595 sizeof(arg64.error_info));
596 err |= get_user(arg64.buf_size, &arg32->buf_size);
597 err |= get_user(cp, &arg32->buf);
598 arg64.buf = compat_ptr(cp);
599 err |= copy_to_user(p, &arg64, sizeof(arg64));
600
601 if (err)
602 return -EFAULT;
603
604 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
605 if (err)
606 return err;
607 err |=
608 copy_in_user(&arg32->error_info, &p->error_info,
609 sizeof(arg32->error_info));
610 if (err)
611 return -EFAULT;
612 return err;
613}
614
615static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
616 unsigned long arg)
617{
618 BIG_IOCTL32_Command_struct __user *arg32 =
619 (BIG_IOCTL32_Command_struct __user *) arg;
620 BIG_IOCTL_Command_struct arg64;
621 BIG_IOCTL_Command_struct __user *p =
622 compat_alloc_user_space(sizeof(arg64));
623 int err;
624 u32 cp;
625
626 err = 0;
627 err |=
628 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
629 sizeof(arg64.LUN_info));
630 err |=
631 copy_from_user(&arg64.Request, &arg32->Request,
632 sizeof(arg64.Request));
633 err |=
634 copy_from_user(&arg64.error_info, &arg32->error_info,
635 sizeof(arg64.error_info));
636 err |= get_user(arg64.buf_size, &arg32->buf_size);
637 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
638 err |= get_user(cp, &arg32->buf);
639 arg64.buf = compat_ptr(cp);
640 err |= copy_to_user(p, &arg64, sizeof(arg64));
641
642 if (err)
643 return -EFAULT;
644
645 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
646 if (err)
647 return err;
648 err |=
649 copy_in_user(&arg32->error_info, &p->error_info,
650 sizeof(arg32->error_info));
651 if (err)
652 return -EFAULT;
653 return err;
654}
655#endif
656
657static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
658{
659 drive_info_struct *drv = get_drv(bdev->bd_disk);
660
661 if (!drv->cylinders)
662 return -ENXIO;
663
664 geo->heads = drv->heads;
665 geo->sectors = drv->sectors;
666 geo->cylinders = drv->cylinders;
667 return 0;
668}
669
670/*
671 * ioctl
672 */
673static int cciss_ioctl(struct inode *inode, struct file *filep,
674 unsigned int cmd, unsigned long arg)
675{
676 struct block_device *bdev = inode->i_bdev;
677 struct gendisk *disk = bdev->bd_disk;
678 ctlr_info_t *host = get_host(disk);
679 drive_info_struct *drv = get_drv(disk);
680 int ctlr = host->ctlr;
681 void __user *argp = (void __user *)arg;
682
683#ifdef CCISS_DEBUG
684 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
685#endif /* CCISS_DEBUG */
686
687 switch (cmd) {
688 case CCISS_GETPCIINFO:
689 {
690 cciss_pci_info_struct pciinfo;
691
692 if (!arg)
693 return -EINVAL;
694 pciinfo.domain = pci_domain_nr(host->pdev->bus);
695 pciinfo.bus = host->pdev->bus->number;
696 pciinfo.dev_fn = host->pdev->devfn;
697 pciinfo.board_id = host->board_id;
698 if (copy_to_user
699 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
700 return -EFAULT;
701 return 0;
702 }
703 case CCISS_GETINTINFO:
704 {
705 cciss_coalint_struct intinfo;
706 if (!arg)
707 return -EINVAL;
708 intinfo.delay =
709 readl(&host->cfgtable->HostWrite.CoalIntDelay);
710 intinfo.count =
711 readl(&host->cfgtable->HostWrite.CoalIntCount);
712 if (copy_to_user
713 (argp, &intinfo, sizeof(cciss_coalint_struct)))
714 return -EFAULT;
715 return 0;
716 }
717 case CCISS_SETINTINFO:
718 {
719 cciss_coalint_struct intinfo;
720 unsigned long flags;
721 int i;
722
723 if (!arg)
724 return -EINVAL;
725 if (!capable(CAP_SYS_ADMIN))
726 return -EPERM;
727 if (copy_from_user
728 (&intinfo, argp, sizeof(cciss_coalint_struct)))
729 return -EFAULT;
730 if ((intinfo.delay == 0) && (intinfo.count == 0))
731 {
732// printk("cciss_ioctl: delay and count cannot be 0\n");
733 return -EINVAL;
734 }
735 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
736 /* Update the field, and then ring the doorbell */
737 writel(intinfo.delay,
738 &(host->cfgtable->HostWrite.CoalIntDelay));
739 writel(intinfo.count,
740 &(host->cfgtable->HostWrite.CoalIntCount));
741 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
742
743 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
744 if (!(readl(host->vaddr + SA5_DOORBELL)
745 & CFGTBL_ChangeReq))
746 break;
747 /* delay and try again */
748 udelay(1000);
749 }
750 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
751 if (i >= MAX_IOCTL_CONFIG_WAIT)
752 return -EAGAIN;
753 return 0;
754 }
755 case CCISS_GETNODENAME:
756 {
757 NodeName_type NodeName;
758 int i;
759
760 if (!arg)
761 return -EINVAL;
762 for (i = 0; i < 16; i++)
763 NodeName[i] =
764 readb(&host->cfgtable->ServerName[i]);
765 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
766 return -EFAULT;
767 return 0;
768 }
769 case CCISS_SETNODENAME:
770 {
771 NodeName_type NodeName;
772 unsigned long flags;
773 int i;
774
775 if (!arg)
776 return -EINVAL;
777 if (!capable(CAP_SYS_ADMIN))
778 return -EPERM;
779
780 if (copy_from_user
781 (NodeName, argp, sizeof(NodeName_type)))
782 return -EFAULT;
783
784 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
785
786 /* Update the field, and then ring the doorbell */
787 for (i = 0; i < 16; i++)
788 writeb(NodeName[i],
789 &host->cfgtable->ServerName[i]);
790
791 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
792
793 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
794 if (!(readl(host->vaddr + SA5_DOORBELL)
795 & CFGTBL_ChangeReq))
796 break;
797 /* delay and try again */
798 udelay(1000);
799 }
800 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
801 if (i >= MAX_IOCTL_CONFIG_WAIT)
802 return -EAGAIN;
803 return 0;
804 }
805
806 case CCISS_GETHEARTBEAT:
807 {
808 Heartbeat_type heartbeat;
809
810 if (!arg)
811 return -EINVAL;
812 heartbeat = readl(&host->cfgtable->HeartBeat);
813 if (copy_to_user
814 (argp, &heartbeat, sizeof(Heartbeat_type)))
815 return -EFAULT;
816 return 0;
817 }
818 case CCISS_GETBUSTYPES:
819 {
820 BusTypes_type BusTypes;
821
822 if (!arg)
823 return -EINVAL;
824 BusTypes = readl(&host->cfgtable->BusTypes);
825 if (copy_to_user
826 (argp, &BusTypes, sizeof(BusTypes_type)))
827 return -EFAULT;
828 return 0;
829 }
830 case CCISS_GETFIRMVER:
831 {
832 FirmwareVer_type firmware;
833
834 if (!arg)
835 return -EINVAL;
836 memcpy(firmware, host->firm_ver, 4);
837
838 if (copy_to_user
839 (argp, firmware, sizeof(FirmwareVer_type)))
840 return -EFAULT;
841 return 0;
842 }
843 case CCISS_GETDRIVVER:
844 {
845 DriverVer_type DriverVer = DRIVER_VERSION;
846
847 if (!arg)
848 return -EINVAL;
849
850 if (copy_to_user
851 (argp, &DriverVer, sizeof(DriverVer_type)))
852 return -EFAULT;
853 return 0;
854 }
855
856 case CCISS_REVALIDVOLS:
857 if (bdev != bdev->bd_contains || drv != host->drv)
858 return -ENXIO;
859 return revalidate_allvol(host);
860
861 case CCISS_GETLUNINFO:{
862 LogvolInfo_struct luninfo;
863
864 luninfo.LunID = drv->LunID;
865 luninfo.num_opens = drv->usage_count;
866 luninfo.num_parts = 0;
867 if (copy_to_user(argp, &luninfo,
868 sizeof(LogvolInfo_struct)))
869 return -EFAULT;
870 return 0;
871 }
872 case CCISS_DEREGDISK:
873 return rebuild_lun_table(host, disk);
874
875 case CCISS_REGNEWD:
876 return rebuild_lun_table(host, NULL);
877
878 case CCISS_PASSTHRU:
879 {
880 IOCTL_Command_struct iocommand;
881 CommandList_struct *c;
882 char *buff = NULL;
883 u64bit temp64;
884 unsigned long flags;
885 DECLARE_COMPLETION_ONSTACK(wait);
886
887 if (!arg)
888 return -EINVAL;
889
890 if (!capable(CAP_SYS_RAWIO))
891 return -EPERM;
892
893 if (copy_from_user
894 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
895 return -EFAULT;
896 if ((iocommand.buf_size < 1) &&
897 (iocommand.Request.Type.Direction != XFER_NONE)) {
898 return -EINVAL;
899 }
900#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
901 /* Check kmalloc limits */
902 if (iocommand.buf_size > 128000)
903 return -EINVAL;
904#endif
905 if (iocommand.buf_size > 0) {
906 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
907 if (buff == NULL)
908 return -EFAULT;
909 }
910 if (iocommand.Request.Type.Direction == XFER_WRITE) {
911 /* Copy the data into the buffer we created */
912 if (copy_from_user
913 (buff, iocommand.buf, iocommand.buf_size)) {
914 kfree(buff);
915 return -EFAULT;
916 }
917 } else {
918 memset(buff, 0, iocommand.buf_size);
919 }
920 if ((c = cmd_alloc(host, 0)) == NULL) {
921 kfree(buff);
922 return -ENOMEM;
923 }
924 // Fill in the command type
925 c->cmd_type = CMD_IOCTL_PEND;
926 // Fill in Command Header
927 c->Header.ReplyQueue = 0; // unused in simple mode
928 if (iocommand.buf_size > 0) // buffer to fill
929 {
930 c->Header.SGList = 1;
931 c->Header.SGTotal = 1;
932 } else // no buffers to fill
933 {
934 c->Header.SGList = 0;
935 c->Header.SGTotal = 0;
936 }
937 c->Header.LUN = iocommand.LUN_info;
938 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
939
940 // Fill in Request block
941 c->Request = iocommand.Request;
942
943 // Fill in the scatter gather information
944 if (iocommand.buf_size > 0) {
945 temp64.val = pci_map_single(host->pdev, buff,
946 iocommand.buf_size,
947 PCI_DMA_BIDIRECTIONAL);
948 c->SG[0].Addr.lower = temp64.val32.lower;
949 c->SG[0].Addr.upper = temp64.val32.upper;
950 c->SG[0].Len = iocommand.buf_size;
951 c->SG[0].Ext = 0; // we are not chaining
952 }
953 c->waiting = &wait;
954
955 /* Put the request on the tail of the request queue */
956 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
957 addQ(&host->reqQ, c);
958 host->Qdepth++;
959 start_io(host);
960 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
961
962 wait_for_completion(&wait);
963
964 /* unlock the buffers from DMA */
965 temp64.val32.lower = c->SG[0].Addr.lower;
966 temp64.val32.upper = c->SG[0].Addr.upper;
967 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
968 iocommand.buf_size,
969 PCI_DMA_BIDIRECTIONAL);
970
971 /* Copy the error information out */
972 iocommand.error_info = *(c->err_info);
973 if (copy_to_user
974 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
975 kfree(buff);
976 cmd_free(host, c, 0);
977 return -EFAULT;
978 }
979
980 if (iocommand.Request.Type.Direction == XFER_READ) {
981 /* Copy the data out of the buffer we created */
982 if (copy_to_user
983 (iocommand.buf, buff, iocommand.buf_size)) {
984 kfree(buff);
985 cmd_free(host, c, 0);
986 return -EFAULT;
987 }
988 }
989 kfree(buff);
990 cmd_free(host, c, 0);
991 return 0;
992 }
993 case CCISS_BIG_PASSTHRU:{
994 BIG_IOCTL_Command_struct *ioc;
995 CommandList_struct *c;
996 unsigned char **buff = NULL;
997 int *buff_size = NULL;
998 u64bit temp64;
999 unsigned long flags;
1000 BYTE sg_used = 0;
1001 int status = 0;
1002 int i;
1003 DECLARE_COMPLETION_ONSTACK(wait);
1004 __u32 left;
1005 __u32 sz;
1006 BYTE __user *data_ptr;
1007
1008 if (!arg)
1009 return -EINVAL;
1010 if (!capable(CAP_SYS_RAWIO))
1011 return -EPERM;
1012 ioc = (BIG_IOCTL_Command_struct *)
1013 kmalloc(sizeof(*ioc), GFP_KERNEL);
1014 if (!ioc) {
1015 status = -ENOMEM;
1016 goto cleanup1;
1017 }
1018 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1019 status = -EFAULT;
1020 goto cleanup1;
1021 }
1022 if ((ioc->buf_size < 1) &&
1023 (ioc->Request.Type.Direction != XFER_NONE)) {
1024 status = -EINVAL;
1025 goto cleanup1;
1026 }
1027 /* Check kmalloc limits using all SGs */
1028 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1029 status = -EINVAL;
1030 goto cleanup1;
1031 }
1032 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1033 status = -EINVAL;
1034 goto cleanup1;
1035 }
1036 buff =
1037 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1038 if (!buff) {
1039 status = -ENOMEM;
1040 goto cleanup1;
1041 }
1042 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1043 GFP_KERNEL);
1044 if (!buff_size) {
1045 status = -ENOMEM;
1046 goto cleanup1;
1047 }
1048 left = ioc->buf_size;
1049 data_ptr = ioc->buf;
1050 while (left) {
1051 sz = (left >
1052 ioc->malloc_size) ? ioc->
1053 malloc_size : left;
1054 buff_size[sg_used] = sz;
1055 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1056 if (buff[sg_used] == NULL) {
1057 status = -ENOMEM;
1058 goto cleanup1;
1059 }
1060 if (ioc->Request.Type.Direction == XFER_WRITE) {
1061 if (copy_from_user
1062 (buff[sg_used], data_ptr, sz)) {
1063 status = -ENOMEM;
1064 goto cleanup1;
1065 }
1066 } else {
1067 memset(buff[sg_used], 0, sz);
1068 }
1069 left -= sz;
1070 data_ptr += sz;
1071 sg_used++;
1072 }
1073 if ((c = cmd_alloc(host, 0)) == NULL) {
1074 status = -ENOMEM;
1075 goto cleanup1;
1076 }
1077 c->cmd_type = CMD_IOCTL_PEND;
1078 c->Header.ReplyQueue = 0;
1079
1080 if (ioc->buf_size > 0) {
1081 c->Header.SGList = sg_used;
1082 c->Header.SGTotal = sg_used;
1083 } else {
1084 c->Header.SGList = 0;
1085 c->Header.SGTotal = 0;
1086 }
1087 c->Header.LUN = ioc->LUN_info;
1088 c->Header.Tag.lower = c->busaddr;
1089
1090 c->Request = ioc->Request;
1091 if (ioc->buf_size > 0) {
1092 int i;
1093 for (i = 0; i < sg_used; i++) {
1094 temp64.val =
1095 pci_map_single(host->pdev, buff[i],
1096 buff_size[i],
1097 PCI_DMA_BIDIRECTIONAL);
1098 c->SG[i].Addr.lower =
1099 temp64.val32.lower;
1100 c->SG[i].Addr.upper =
1101 temp64.val32.upper;
1102 c->SG[i].Len = buff_size[i];
1103 c->SG[i].Ext = 0; /* we are not chaining */
1104 }
1105 }
1106 c->waiting = &wait;
1107 /* Put the request on the tail of the request queue */
1108 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1109 addQ(&host->reqQ, c);
1110 host->Qdepth++;
1111 start_io(host);
1112 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1113 wait_for_completion(&wait);
1114 /* unlock the buffers from DMA */
1115 for (i = 0; i < sg_used; i++) {
1116 temp64.val32.lower = c->SG[i].Addr.lower;
1117 temp64.val32.upper = c->SG[i].Addr.upper;
1118 pci_unmap_single(host->pdev,
1119 (dma_addr_t) temp64.val, buff_size[i],
1120 PCI_DMA_BIDIRECTIONAL);
1121 }
1122 /* Copy the error information out */
1123 ioc->error_info = *(c->err_info);
1124 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1125 cmd_free(host, c, 0);
1126 status = -EFAULT;
1127 goto cleanup1;
1128 }
1129 if (ioc->Request.Type.Direction == XFER_READ) {
1130 /* Copy the data out of the buffer we created */
1131 BYTE __user *ptr = ioc->buf;
1132 for (i = 0; i < sg_used; i++) {
1133 if (copy_to_user
1134 (ptr, buff[i], buff_size[i])) {
1135 cmd_free(host, c, 0);
1136 status = -EFAULT;
1137 goto cleanup1;
1138 }
1139 ptr += buff_size[i];
1140 }
1141 }
1142 cmd_free(host, c, 0);
1143 status = 0;
1144 cleanup1:
1145 if (buff) {
1146 for (i = 0; i < sg_used; i++)
1147 kfree(buff[i]);
1148 kfree(buff);
1149 }
1150 kfree(buff_size);
1151 kfree(ioc);
1152 return status;
1153 }
1154 default:
1155 return -ENOTTY;
1156 }
1157}
1158
1159/*
1160 * revalidate_allvol is for online array config utilities. After a
1161 * utility reconfigures the drives in the array, it can use this function
1162 * (through an ioctl) to make the driver zap any previous disk structs for
1163 * that controller and get new ones.
1164 *
1165 * Right now I'm using the getgeometry() function to do this, but this
1166 * function should probably be finer grained and allow you to revalidate one
1167 * particular logical volume (instead of all of them on a particular
1168 * controller).
1169 */
1170static int revalidate_allvol(ctlr_info_t *host)
1171{
1172 int ctlr = host->ctlr, i;
1173 unsigned long flags;
1174
1175 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1176 if (host->usage_count > 1) {
1177 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1178 printk(KERN_WARNING "cciss: Device busy for volume"
1179 " revalidation (usage=%d)\n", host->usage_count);
1180 return -EBUSY;
1181 }
1182 host->usage_count++;
1183 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1184
1185 for (i = 0; i < NWD; i++) {
1186 struct gendisk *disk = host->gendisk[i];
1187 if (disk) {
1188 request_queue_t *q = disk->queue;
1189
1190 if (disk->flags & GENHD_FL_UP)
1191 del_gendisk(disk);
1192 if (q)
1193 blk_cleanup_queue(q);
1194 }
1195 }
1196
1197 /*
1198 * Set the partition and block size structures for all volumes
1199 * on this controller to zero. We will reread all of this data
1200 */
1201 memset(host->drv, 0, sizeof(drive_info_struct)
1202 * CISS_MAX_LUN);
1203 /*
1204 * Tell the array controller not to give us any interrupts while
1205 * we check the new geometry. Then turn interrupts back on when
1206 * we're done.
1207 */
1208 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1209 cciss_getgeometry(ctlr);
1210 host->access.set_intr_mask(host, CCISS_INTR_ON);
1211
1212 /* Loop through each real device */
1213 for (i = 0; i < NWD; i++) {
1214 struct gendisk *disk = host->gendisk[i];
1215 drive_info_struct *drv = &(host->drv[i]);
1216 /* we must register the controller even if no disks exist */
1217 /* this is for the online array utilities */
1218 if (!drv->heads && i)
1219 continue;
1220 blk_queue_hardsect_size(drv->queue, drv->block_size);
1221 set_capacity(disk, drv->nr_blocks);
1222 add_disk(disk);
1223 }
1224 host->usage_count--;
1225 return 0;
1226}
1227
1228static inline void complete_buffers(struct bio *bio, int status)
1229{
1230 while (bio) {
1231 struct bio *xbh = bio->bi_next;
1232 int nr_sectors = bio_sectors(bio);
1233
1234 bio->bi_next = NULL;
1235 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1236 bio = xbh;
1237 }
1238}
1239
1240static void cciss_check_queues(ctlr_info_t *h)
1241{
1242 int start_queue = h->next_to_run;
1243 int i;
1244
1245 /* check to see if we have maxed out the number of commands that can
1246 * be placed on the queue. If so then exit. We do this check here
1247 * in case the interrupt we serviced was from an ioctl and did not
1248 * free any new commands.
1249 */
1250 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1251 return;
1252
1253 /* We have room on the queue for more commands. Now we need to queue
1254 * them up. We will also keep track of the next queue to run so
1255 * that every queue gets a chance to be started first.
1256 */
1257 for (i = 0; i < h->highest_lun + 1; i++) {
1258 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1259 /* make sure the disk has been added and the drive is real
1260 * because this can be called from the middle of init_one.
1261 */
1262 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1263 continue;
1264 blk_start_queue(h->gendisk[curr_queue]->queue);
1265
1266 /* check to see if we have maxed out the number of commands
1267 * that can be placed on the queue.
1268 */
1269 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1270 if (curr_queue == start_queue) {
1271 h->next_to_run =
1272 (start_queue + 1) % (h->highest_lun + 1);
1273 break;
1274 } else {
1275 h->next_to_run = curr_queue;
1276 break;
1277 }
1278 } else {
1279 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1280 }
1281 }
1282}
1283
1284static void cciss_softirq_done(struct request *rq)
1285{
1286 CommandList_struct *cmd = rq->completion_data;
1287 ctlr_info_t *h = hba[cmd->ctlr];
1288 unsigned long flags;
1289 u64bit temp64;
1290 int i, ddir;
1291
1292 if (cmd->Request.Type.Direction == XFER_READ)
1293 ddir = PCI_DMA_FROMDEVICE;
1294 else
1295 ddir = PCI_DMA_TODEVICE;
1296
1297 /* command did not need to be retried */
1298 /* unmap the DMA mapping for all the scatter gather elements */
1299 for (i = 0; i < cmd->Header.SGList; i++) {
1300 temp64.val32.lower = cmd->SG[i].Addr.lower;
1301 temp64.val32.upper = cmd->SG[i].Addr.upper;
1302 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1303 }
1304
1305 complete_buffers(rq->bio, rq->errors);
1306
1307 if (blk_fs_request(rq)) {
1308 const int rw = rq_data_dir(rq);
1309
1310 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1311 }
1312
1313#ifdef CCISS_DEBUG
1314 printk("Done with %p\n", rq);
1315#endif /* CCISS_DEBUG */
1316
1317 add_disk_randomness(rq->rq_disk);
1318 spin_lock_irqsave(&h->lock, flags);
1319 end_that_request_last(rq, rq->errors);
1320 cmd_free(h, cmd, 1);
1321 cciss_check_queues(h);
1322 spin_unlock_irqrestore(&h->lock, flags);
1323}
1324
1325/* This function will check the usage_count of the drive to be updated/added.
1326 * If the usage_count is zero then the drive information will be updated and
1327 * the disk will be re-registered with the kernel. If not then it will be
1328 * left alone for the next reboot. The exception to this is disk 0 which
1329 * will always be left registered with the kernel since it is also the
1330 * controller node. Any changes to disk 0 will show up on the next
1331 * reboot.
1332 */
1333static void cciss_update_drive_info(int ctlr, int drv_index)
1334{
1335 ctlr_info_t *h = hba[ctlr];
1336 struct gendisk *disk;
1337 InquiryData_struct *inq_buff = NULL;
1338 unsigned int block_size;
1339 sector_t total_size;
1340 unsigned long flags = 0;
1341 int ret = 0;
1342
1343 /* if the disk already exists then deregister it before proceeding */
1344 if (h->drv[drv_index].raid_level != -1) {
1345 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1346 h->drv[drv_index].busy_configuring = 1;
1347 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1348 ret = deregister_disk(h->gendisk[drv_index],
1349 &h->drv[drv_index], 0);
1350 h->drv[drv_index].busy_configuring = 0;
1351 }
1352
1353 /* If the disk is in use return */
1354 if (ret)
1355 return;
1356
1357 /* Get information about the disk and modify the driver structure */
1358 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1359 if (inq_buff == NULL)
1360 goto mem_msg;
1361
1362 cciss_read_capacity(ctlr, drv_index, 1,
1363 &total_size, &block_size);
1364
1365 /* total size = last LBA + 1 */
1366 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1367 /* so we assume this volume this must be >2TB in size */
1368 if (total_size == (__u32) 0) {
1369 cciss_read_capacity_16(ctlr, drv_index, 1,
1370 &total_size, &block_size);
1371 h->cciss_read = CCISS_READ_16;
1372 h->cciss_write = CCISS_WRITE_16;
1373 } else {
1374 h->cciss_read = CCISS_READ_10;
1375 h->cciss_write = CCISS_WRITE_10;
1376 }
1377 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1378 inq_buff, &h->drv[drv_index]);
1379
1380 ++h->num_luns;
1381 disk = h->gendisk[drv_index];
1382 set_capacity(disk, h->drv[drv_index].nr_blocks);
1383
1384 /* if it's the controller it's already added */
1385 if (drv_index) {
1386 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1387
1388 /* Set up queue information */
1389 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1390 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1391
1392 /* This is a hardware imposed limit. */
1393 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1394
1395 /* This is a limit in the driver and could be eliminated. */
1396 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1397
1398 blk_queue_max_sectors(disk->queue, 512);
1399
1400 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1401
1402 disk->queue->queuedata = hba[ctlr];
1403
1404 blk_queue_hardsect_size(disk->queue,
1405 hba[ctlr]->drv[drv_index].block_size);
1406
1407 h->drv[drv_index].queue = disk->queue;
1408 add_disk(disk);
1409 }
1410
1411 freeret:
1412 kfree(inq_buff);
1413 return;
1414 mem_msg:
1415 printk(KERN_ERR "cciss: out of memory\n");
1416 goto freeret;
1417}
1418
1419/* This function will find the first index of the controllers drive array
1420 * that has a -1 for the raid_level and will return that index. This is
1421 * where new drives will be added. If the index to be returned is greater
1422 * than the highest_lun index for the controller then highest_lun is set
1423 * to this new index. If there are no available indexes then -1 is returned.
1424 */
1425static int cciss_find_free_drive_index(int ctlr)
1426{
1427 int i;
1428
1429 for (i = 0; i < CISS_MAX_LUN; i++) {
1430 if (hba[ctlr]->drv[i].raid_level == -1) {
1431 if (i > hba[ctlr]->highest_lun)
1432 hba[ctlr]->highest_lun = i;
1433 return i;
1434 }
1435 }
1436 return -1;
1437}
1438
1439/* This function will add and remove logical drives from the Logical
1440 * drive array of the controller and maintain persistency of ordering
1441 * so that mount points are preserved until the next reboot. This allows
1442 * for the removal of logical drives in the middle of the drive array
1443 * without a re-ordering of those drives.
1444 * INPUT
1445 * h = The controller to perform the operations on
1446 * del_disk = The disk to remove if specified. If the value given
1447 * is NULL then no disk is removed.
1448 */
1449static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1450{
1451 int ctlr = h->ctlr;
1452 int num_luns;
1453 ReportLunData_struct *ld_buff = NULL;
1454 drive_info_struct *drv = NULL;
1455 int return_code;
1456 int listlength = 0;
1457 int i;
1458 int drv_found;
1459 int drv_index = 0;
1460 __u32 lunid = 0;
1461 unsigned long flags;
1462
1463 /* Set busy_configuring flag for this operation */
1464 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1465 if (h->num_luns >= CISS_MAX_LUN) {
1466 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1467 return -EINVAL;
1468 }
1469
1470 if (h->busy_configuring) {
1471 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1472 return -EBUSY;
1473 }
1474 h->busy_configuring = 1;
1475
1476 /* if del_disk is NULL then we are being called to add a new disk
1477 * and update the logical drive table. If it is not NULL then
1478 * we will check if the disk is in use or not.
1479 */
1480 if (del_disk != NULL) {
1481 drv = get_drv(del_disk);
1482 drv->busy_configuring = 1;
1483 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1484 return_code = deregister_disk(del_disk, drv, 1);
1485 drv->busy_configuring = 0;
1486 h->busy_configuring = 0;
1487 return return_code;
1488 } else {
1489 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1490 if (!capable(CAP_SYS_RAWIO))
1491 return -EPERM;
1492
1493 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1494 if (ld_buff == NULL)
1495 goto mem_msg;
1496
1497 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1498 sizeof(ReportLunData_struct), 0,
1499 0, 0, TYPE_CMD);
1500
1501 if (return_code == IO_OK) {
1502 listlength |=
1503 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1504 << 24;
1505 listlength |=
1506 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1507 << 16;
1508 listlength |=
1509 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1510 << 8;
1511 listlength |=
1512 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1513 } else { /* reading number of logical volumes failed */
1514 printk(KERN_WARNING "cciss: report logical volume"
1515 " command failed\n");
1516 listlength = 0;
1517 goto freeret;
1518 }
1519
1520 num_luns = listlength / 8; /* 8 bytes per entry */
1521 if (num_luns > CISS_MAX_LUN) {
1522 num_luns = CISS_MAX_LUN;
1523 printk(KERN_WARNING "cciss: more luns configured"
1524 " on controller than can be handled by"
1525 " this driver.\n");
1526 }
1527
1528 /* Compare controller drive array to drivers drive array.
1529 * Check for updates in the drive information and any new drives
1530 * on the controller.
1531 */
1532 for (i = 0; i < num_luns; i++) {
1533 int j;
1534
1535 drv_found = 0;
1536
1537 lunid = (0xff &
1538 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1539 lunid |= (0xff &
1540 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1541 lunid |= (0xff &
1542 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1543 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1544
1545 /* Find if the LUN is already in the drive array
1546 * of the controller. If so then update its info
1547 * if not is use. If it does not exist then find
1548 * the first free index and add it.
1549 */
1550 for (j = 0; j <= h->highest_lun; j++) {
1551 if (h->drv[j].LunID == lunid) {
1552 drv_index = j;
1553 drv_found = 1;
1554 }
1555 }
1556
1557 /* check if the drive was found already in the array */
1558 if (!drv_found) {
1559 drv_index = cciss_find_free_drive_index(ctlr);
1560 if (drv_index == -1)
1561 goto freeret;
1562
1563 }
1564 h->drv[drv_index].LunID = lunid;
1565 cciss_update_drive_info(ctlr, drv_index);
1566 } /* end for */
1567 } /* end else */
1568
1569 freeret:
1570 kfree(ld_buff);
1571 h->busy_configuring = 0;
1572 /* We return -1 here to tell the ACU that we have registered/updated
1573 * all of the drives that we can and to keep it from calling us
1574 * additional times.
1575 */
1576 return -1;
1577 mem_msg:
1578 printk(KERN_ERR "cciss: out of memory\n");
1579 goto freeret;
1580}
1581
1582/* This function will deregister the disk and it's queue from the
1583 * kernel. It must be called with the controller lock held and the
1584 * drv structures busy_configuring flag set. It's parameters are:
1585 *
1586 * disk = This is the disk to be deregistered
1587 * drv = This is the drive_info_struct associated with the disk to be
1588 * deregistered. It contains information about the disk used
1589 * by the driver.
1590 * clear_all = This flag determines whether or not the disk information
1591 * is going to be completely cleared out and the highest_lun
1592 * reset. Sometimes we want to clear out information about
1593 * the disk in preparation for re-adding it. In this case
1594 * the highest_lun should be left unchanged and the LunID
1595 * should not be cleared.
1596*/
1597static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1598 int clear_all)
1599{
1600 ctlr_info_t *h = get_host(disk);
1601
1602 if (!capable(CAP_SYS_RAWIO))
1603 return -EPERM;
1604
1605 /* make sure logical volume is NOT is use */
1606 if (clear_all || (h->gendisk[0] == disk)) {
1607 if (drv->usage_count > 1)
1608 return -EBUSY;
1609 } else if (drv->usage_count > 0)
1610 return -EBUSY;
1611
1612 /* invalidate the devices and deregister the disk. If it is disk
1613 * zero do not deregister it but just zero out it's values. This
1614 * allows us to delete disk zero but keep the controller registered.
1615 */
1616 if (h->gendisk[0] != disk) {
1617 if (disk) {
1618 request_queue_t *q = disk->queue;
1619 if (disk->flags & GENHD_FL_UP)
1620 del_gendisk(disk);
1621 if (q) {
1622 blk_cleanup_queue(q);
1623 drv->queue = NULL;
1624 }
1625 }
1626 }
1627
1628 --h->num_luns;
1629 /* zero out the disk size info */
1630 drv->nr_blocks = 0;
1631 drv->block_size = 0;
1632 drv->heads = 0;
1633 drv->sectors = 0;
1634 drv->cylinders = 0;
1635 drv->raid_level = -1; /* This can be used as a flag variable to
1636 * indicate that this element of the drive
1637 * array is free.
1638 */
1639
1640 if (clear_all) {
1641 /* check to see if it was the last disk */
1642 if (drv == h->drv + h->highest_lun) {
1643 /* if so, find the new hightest lun */
1644 int i, newhighest = -1;
1645 for (i = 0; i < h->highest_lun; i++) {
1646 /* if the disk has size > 0, it is available */
1647 if (h->drv[i].heads)
1648 newhighest = i;
1649 }
1650 h->highest_lun = newhighest;
1651 }
1652
1653 drv->LunID = 0;
1654 }
1655 return 0;
1656}
1657
1658static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1659 1: address logical volume log_unit,
1660 2: periph device address is scsi3addr */
1661 unsigned int log_unit, __u8 page_code,
1662 unsigned char *scsi3addr, int cmd_type)
1663{
1664 ctlr_info_t *h = hba[ctlr];
1665 u64bit buff_dma_handle;
1666 int status = IO_OK;
1667
1668 c->cmd_type = CMD_IOCTL_PEND;
1669 c->Header.ReplyQueue = 0;
1670 if (buff != NULL) {
1671 c->Header.SGList = 1;
1672 c->Header.SGTotal = 1;
1673 } else {
1674 c->Header.SGList = 0;
1675 c->Header.SGTotal = 0;
1676 }
1677 c->Header.Tag.lower = c->busaddr;
1678
1679 c->Request.Type.Type = cmd_type;
1680 if (cmd_type == TYPE_CMD) {
1681 switch (cmd) {
1682 case CISS_INQUIRY:
1683 /* If the logical unit number is 0 then, this is going
1684 to controller so It's a physical command
1685 mode = 0 target = 0. So we have nothing to write.
1686 otherwise, if use_unit_num == 1,
1687 mode = 1(volume set addressing) target = LUNID
1688 otherwise, if use_unit_num == 2,
1689 mode = 0(periph dev addr) target = scsi3addr */
1690 if (use_unit_num == 1) {
1691 c->Header.LUN.LogDev.VolId =
1692 h->drv[log_unit].LunID;
1693 c->Header.LUN.LogDev.Mode = 1;
1694 } else if (use_unit_num == 2) {
1695 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1696 8);
1697 c->Header.LUN.LogDev.Mode = 0;
1698 }
1699 /* are we trying to read a vital product page */
1700 if (page_code != 0) {
1701 c->Request.CDB[1] = 0x01;
1702 c->Request.CDB[2] = page_code;
1703 }
1704 c->Request.CDBLen = 6;
1705 c->Request.Type.Attribute = ATTR_SIMPLE;
1706 c->Request.Type.Direction = XFER_READ;
1707 c->Request.Timeout = 0;
1708 c->Request.CDB[0] = CISS_INQUIRY;
1709 c->Request.CDB[4] = size & 0xFF;
1710 break;
1711 case CISS_REPORT_LOG:
1712 case CISS_REPORT_PHYS:
1713 /* Talking to controller so It's a physical command
1714 mode = 00 target = 0. Nothing to write.
1715 */
1716 c->Request.CDBLen = 12;
1717 c->Request.Type.Attribute = ATTR_SIMPLE;
1718 c->Request.Type.Direction = XFER_READ;
1719 c->Request.Timeout = 0;
1720 c->Request.CDB[0] = cmd;
1721 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1722 c->Request.CDB[7] = (size >> 16) & 0xFF;
1723 c->Request.CDB[8] = (size >> 8) & 0xFF;
1724 c->Request.CDB[9] = size & 0xFF;
1725 break;
1726
1727 case CCISS_READ_CAPACITY:
1728 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1729 c->Header.LUN.LogDev.Mode = 1;
1730 c->Request.CDBLen = 10;
1731 c->Request.Type.Attribute = ATTR_SIMPLE;
1732 c->Request.Type.Direction = XFER_READ;
1733 c->Request.Timeout = 0;
1734 c->Request.CDB[0] = cmd;
1735 break;
1736 case CCISS_READ_CAPACITY_16:
1737 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1738 c->Header.LUN.LogDev.Mode = 1;
1739 c->Request.CDBLen = 16;
1740 c->Request.Type.Attribute = ATTR_SIMPLE;
1741 c->Request.Type.Direction = XFER_READ;
1742 c->Request.Timeout = 0;
1743 c->Request.CDB[0] = cmd;
1744 c->Request.CDB[1] = 0x10;
1745 c->Request.CDB[10] = (size >> 24) & 0xFF;
1746 c->Request.CDB[11] = (size >> 16) & 0xFF;
1747 c->Request.CDB[12] = (size >> 8) & 0xFF;
1748 c->Request.CDB[13] = size & 0xFF;
1749 c->Request.Timeout = 0;
1750 c->Request.CDB[0] = cmd;
1751 break;
1752 case CCISS_CACHE_FLUSH:
1753 c->Request.CDBLen = 12;
1754 c->Request.Type.Attribute = ATTR_SIMPLE;
1755 c->Request.Type.Direction = XFER_WRITE;
1756 c->Request.Timeout = 0;
1757 c->Request.CDB[0] = BMIC_WRITE;
1758 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1759 break;
1760 default:
1761 printk(KERN_WARNING
1762 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1763 return IO_ERROR;
1764 }
1765 } else if (cmd_type == TYPE_MSG) {
1766 switch (cmd) {
1767 case 0: /* ABORT message */
1768 c->Request.CDBLen = 12;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_WRITE;
1771 c->Request.Timeout = 0;
1772 c->Request.CDB[0] = cmd; /* abort */
1773 c->Request.CDB[1] = 0; /* abort a command */
1774 /* buff contains the tag of the command to abort */
1775 memcpy(&c->Request.CDB[4], buff, 8);
1776 break;
1777 case 1: /* RESET message */
1778 c->Request.CDBLen = 12;
1779 c->Request.Type.Attribute = ATTR_SIMPLE;
1780 c->Request.Type.Direction = XFER_WRITE;
1781 c->Request.Timeout = 0;
1782 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1783 c->Request.CDB[0] = cmd; /* reset */
1784 c->Request.CDB[1] = 0x04; /* reset a LUN */
1785 break;
1786 case 3: /* No-Op message */
1787 c->Request.CDBLen = 1;
1788 c->Request.Type.Attribute = ATTR_SIMPLE;
1789 c->Request.Type.Direction = XFER_WRITE;
1790 c->Request.Timeout = 0;
1791 c->Request.CDB[0] = cmd;
1792 break;
1793 default:
1794 printk(KERN_WARNING
1795 "cciss%d: unknown message type %d\n", ctlr, cmd);
1796 return IO_ERROR;
1797 }
1798 } else {
1799 printk(KERN_WARNING
1800 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1801 return IO_ERROR;
1802 }
1803 /* Fill in the scatter gather information */
1804 if (size > 0) {
1805 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1806 buff, size,
1807 PCI_DMA_BIDIRECTIONAL);
1808 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1809 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1810 c->SG[0].Len = size;
1811 c->SG[0].Ext = 0; /* we are not chaining */
1812 }
1813 return status;
1814}
1815
1816static int sendcmd_withirq(__u8 cmd,
1817 int ctlr,
1818 void *buff,
1819 size_t size,
1820 unsigned int use_unit_num,
1821 unsigned int log_unit, __u8 page_code, int cmd_type)
1822{
1823 ctlr_info_t *h = hba[ctlr];
1824 CommandList_struct *c;
1825 u64bit buff_dma_handle;
1826 unsigned long flags;
1827 int return_status;
1828 DECLARE_COMPLETION_ONSTACK(wait);
1829
1830 if ((c = cmd_alloc(h, 0)) == NULL)
1831 return -ENOMEM;
1832 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1833 log_unit, page_code, NULL, cmd_type);
1834 if (return_status != IO_OK) {
1835 cmd_free(h, c, 0);
1836 return return_status;
1837 }
1838 resend_cmd2:
1839 c->waiting = &wait;
1840
1841 /* Put the request on the tail of the queue and send it */
1842 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1843 addQ(&h->reqQ, c);
1844 h->Qdepth++;
1845 start_io(h);
1846 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1847
1848 wait_for_completion(&wait);
1849
1850 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1851 switch (c->err_info->CommandStatus) {
1852 case CMD_TARGET_STATUS:
1853 printk(KERN_WARNING "cciss: cmd %p has "
1854 " completed with errors\n", c);
1855 if (c->err_info->ScsiStatus) {
1856 printk(KERN_WARNING "cciss: cmd %p "
1857 "has SCSI Status = %x\n",
1858 c, c->err_info->ScsiStatus);
1859 }
1860
1861 break;
1862 case CMD_DATA_UNDERRUN:
1863 case CMD_DATA_OVERRUN:
1864 /* expected for inquire and report lun commands */
1865 break;
1866 case CMD_INVALID:
1867 printk(KERN_WARNING "cciss: Cmd %p is "
1868 "reported invalid\n", c);
1869 return_status = IO_ERROR;
1870 break;
1871 case CMD_PROTOCOL_ERR:
1872 printk(KERN_WARNING "cciss: cmd %p has "
1873 "protocol error \n", c);
1874 return_status = IO_ERROR;
1875 break;
1876 case CMD_HARDWARE_ERR:
1877 printk(KERN_WARNING "cciss: cmd %p had "
1878 " hardware error\n", c);
1879 return_status = IO_ERROR;
1880 break;
1881 case CMD_CONNECTION_LOST:
1882 printk(KERN_WARNING "cciss: cmd %p had "
1883 "connection lost\n", c);
1884 return_status = IO_ERROR;
1885 break;
1886 case CMD_ABORTED:
1887 printk(KERN_WARNING "cciss: cmd %p was "
1888 "aborted\n", c);
1889 return_status = IO_ERROR;
1890 break;
1891 case CMD_ABORT_FAILED:
1892 printk(KERN_WARNING "cciss: cmd %p reports "
1893 "abort failed\n", c);
1894 return_status = IO_ERROR;
1895 break;
1896 case CMD_UNSOLICITED_ABORT:
1897 printk(KERN_WARNING
1898 "cciss%d: unsolicited abort %p\n", ctlr, c);
1899 if (c->retry_count < MAX_CMD_RETRIES) {
1900 printk(KERN_WARNING
1901 "cciss%d: retrying %p\n", ctlr, c);
1902 c->retry_count++;
1903 /* erase the old error information */
1904 memset(c->err_info, 0,
1905 sizeof(ErrorInfo_struct));
1906 return_status = IO_OK;
1907 INIT_COMPLETION(wait);
1908 goto resend_cmd2;
1909 }
1910 return_status = IO_ERROR;
1911 break;
1912 default:
1913 printk(KERN_WARNING "cciss: cmd %p returned "
1914 "unknown status %x\n", c,
1915 c->err_info->CommandStatus);
1916 return_status = IO_ERROR;
1917 }
1918 }
1919 /* unlock the buffers from DMA */
1920 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1921 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1922 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1923 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1924 cmd_free(h, c, 0);
1925 return return_status;
1926}
1927
1928static void cciss_geometry_inquiry(int ctlr, int logvol,
1929 int withirq, sector_t total_size,
1930 unsigned int block_size,
1931 InquiryData_struct *inq_buff,
1932 drive_info_struct *drv)
1933{
1934 int return_code;
1935 unsigned long t;
1936
1937 memset(inq_buff, 0, sizeof(InquiryData_struct));
1938 if (withirq)
1939 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1940 inq_buff, sizeof(*inq_buff), 1,
1941 logvol, 0xC1, TYPE_CMD);
1942 else
1943 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1944 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1945 TYPE_CMD);
1946 if (return_code == IO_OK) {
1947 if (inq_buff->data_byte[8] == 0xFF) {
1948 printk(KERN_WARNING
1949 "cciss: reading geometry failed, volume "
1950 "does not support reading geometry\n");
1951 drv->heads = 255;
1952 drv->sectors = 32; // Sectors per track
1953 } else {
1954 drv->heads = inq_buff->data_byte[6];
1955 drv->sectors = inq_buff->data_byte[7];
1956 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1957 drv->cylinders += inq_buff->data_byte[5];
1958 drv->raid_level = inq_buff->data_byte[8];
1959 }
1960 drv->block_size = block_size;
1961 drv->nr_blocks = total_size;
1962 t = drv->heads * drv->sectors;
1963 if (t > 1) {
1964 unsigned rem = sector_div(total_size, t);
1965 if (rem)
1966 total_size++;
1967 drv->cylinders = total_size;
1968 }
1969 } else { /* Get geometry failed */
1970 printk(KERN_WARNING "cciss: reading geometry failed\n");
1971 }
1972 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1973 drv->heads, drv->sectors, drv->cylinders);
1974}
1975
1976static void
1977cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1978 unsigned int *block_size)
1979{
1980 ReadCapdata_struct *buf;
1981 int return_code;
1982 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1983 if (buf == NULL) {
1984 printk(KERN_WARNING "cciss: out of memory\n");
1985 return;
1986 }
1987 memset(buf, 0, sizeof(ReadCapdata_struct));
1988 if (withirq)
1989 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1990 ctlr, buf, sizeof(ReadCapdata_struct),
1991 1, logvol, 0, TYPE_CMD);
1992 else
1993 return_code = sendcmd(CCISS_READ_CAPACITY,
1994 ctlr, buf, sizeof(ReadCapdata_struct),
1995 1, logvol, 0, NULL, TYPE_CMD);
1996 if (return_code == IO_OK) {
1997 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1998 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1999 } else { /* read capacity command failed */
2000 printk(KERN_WARNING "cciss: read capacity failed\n");
2001 *total_size = 0;
2002 *block_size = BLOCK_SIZE;
2003 }
2004 if (*total_size != (__u32) 0)
2005 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2006 (unsigned long long)*total_size, *block_size);
2007 kfree(buf);
2008 return;
2009}
2010
2011static void
2012cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2013{
2014 ReadCapdata_struct_16 *buf;
2015 int return_code;
2016 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2017 if (buf == NULL) {
2018 printk(KERN_WARNING "cciss: out of memory\n");
2019 return;
2020 }
2021 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2022 if (withirq) {
2023 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2024 ctlr, buf, sizeof(ReadCapdata_struct_16),
2025 1, logvol, 0, TYPE_CMD);
2026 }
2027 else {
2028 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2029 ctlr, buf, sizeof(ReadCapdata_struct_16),
2030 1, logvol, 0, NULL, TYPE_CMD);
2031 }
2032 if (return_code == IO_OK) {
2033 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2034 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2035 } else { /* read capacity command failed */
2036 printk(KERN_WARNING "cciss: read capacity failed\n");
2037 *total_size = 0;
2038 *block_size = BLOCK_SIZE;
2039 }
2040 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2041 (unsigned long long)*total_size, *block_size);
2042 kfree(buf);
2043 return;
2044}
2045
2046static int cciss_revalidate(struct gendisk *disk)
2047{
2048 ctlr_info_t *h = get_host(disk);
2049 drive_info_struct *drv = get_drv(disk);
2050 int logvol;
2051 int FOUND = 0;
2052 unsigned int block_size;
2053 sector_t total_size;
2054 InquiryData_struct *inq_buff = NULL;
2055
2056 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2057 if (h->drv[logvol].LunID == drv->LunID) {
2058 FOUND = 1;
2059 break;
2060 }
2061 }
2062
2063 if (!FOUND)
2064 return 1;
2065
2066 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2067 if (inq_buff == NULL) {
2068 printk(KERN_WARNING "cciss: out of memory\n");
2069 return 1;
2070 }
2071 if (h->cciss_read == CCISS_READ_10) {
2072 cciss_read_capacity(h->ctlr, logvol, 1,
2073 &total_size, &block_size);
2074 } else {
2075 cciss_read_capacity_16(h->ctlr, logvol, 1,
2076 &total_size, &block_size);
2077 }
2078 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2079 inq_buff, drv);
2080
2081 blk_queue_hardsect_size(drv->queue, drv->block_size);
2082 set_capacity(disk, drv->nr_blocks);
2083
2084 kfree(inq_buff);
2085 return 0;
2086}
2087
2088/*
2089 * Wait polling for a command to complete.
2090 * The memory mapped FIFO is polled for the completion.
2091 * Used only at init time, interrupts from the HBA are disabled.
2092 */
2093static unsigned long pollcomplete(int ctlr)
2094{
2095 unsigned long done;
2096 int i;
2097
2098 /* Wait (up to 20 seconds) for a command to complete */
2099
2100 for (i = 20 * HZ; i > 0; i--) {
2101 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2102 if (done == FIFO_EMPTY)
2103 schedule_timeout_uninterruptible(1);
2104 else
2105 return done;
2106 }
2107 /* Invalid address to tell caller we ran out of time */
2108 return 1;
2109}
2110
2111static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2112{
2113 /* We get in here if sendcmd() is polling for completions
2114 and gets some command back that it wasn't expecting --
2115 something other than that which it just sent down.
2116 Ordinarily, that shouldn't happen, but it can happen when
2117 the scsi tape stuff gets into error handling mode, and
2118 starts using sendcmd() to try to abort commands and
2119 reset tape drives. In that case, sendcmd may pick up
2120 completions of commands that were sent to logical drives
2121 through the block i/o system, or cciss ioctls completing, etc.
2122 In that case, we need to save those completions for later
2123 processing by the interrupt handler.
2124 */
2125
2126#ifdef CONFIG_CISS_SCSI_TAPE
2127 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2128
2129 /* If it's not the scsi tape stuff doing error handling, (abort */
2130 /* or reset) then we don't expect anything weird. */
2131 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2132#endif
2133 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2134 "Invalid command list address returned! (%lx)\n",
2135 ctlr, complete);
2136 /* not much we can do. */
2137#ifdef CONFIG_CISS_SCSI_TAPE
2138 return 1;
2139 }
2140
2141 /* We've sent down an abort or reset, but something else
2142 has completed */
2143 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2144 /* Uh oh. No room to save it for later... */
2145 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2146 "reject list overflow, command lost!\n", ctlr);
2147 return 1;
2148 }
2149 /* Save it for later */
2150 srl->complete[srl->ncompletions] = complete;
2151 srl->ncompletions++;
2152#endif
2153 return 0;
2154}
2155
2156/*
2157 * Send a command to the controller, and wait for it to complete.
2158 * Only used at init time.
2159 */
2160static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2161 1: address logical volume log_unit,
2162 2: periph device address is scsi3addr */
2163 unsigned int log_unit,
2164 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2165{
2166 CommandList_struct *c;
2167 int i;
2168 unsigned long complete;
2169 ctlr_info_t *info_p = hba[ctlr];
2170 u64bit buff_dma_handle;
2171 int status, done = 0;
2172
2173 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2174 printk(KERN_WARNING "cciss: unable to get memory");
2175 return IO_ERROR;
2176 }
2177 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2178 log_unit, page_code, scsi3addr, cmd_type);
2179 if (status != IO_OK) {
2180 cmd_free(info_p, c, 1);
2181 return status;
2182 }
2183 resend_cmd1:
2184 /*
2185 * Disable interrupt
2186 */
2187#ifdef CCISS_DEBUG
2188 printk(KERN_DEBUG "cciss: turning intr off\n");
2189#endif /* CCISS_DEBUG */
2190 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2191
2192 /* Make sure there is room in the command FIFO */
2193 /* Actually it should be completely empty at this time */
2194 /* unless we are in here doing error handling for the scsi */
2195 /* tape side of the driver. */
2196 for (i = 200000; i > 0; i--) {
2197 /* if fifo isn't full go */
2198 if (!(info_p->access.fifo_full(info_p))) {
2199
2200 break;
2201 }
2202 udelay(10);
2203 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2204 " waiting!\n", ctlr);
2205 }
2206 /*
2207 * Send the cmd
2208 */
2209 info_p->access.submit_command(info_p, c);
2210 done = 0;
2211 do {
2212 complete = pollcomplete(ctlr);
2213
2214#ifdef CCISS_DEBUG
2215 printk(KERN_DEBUG "cciss: command completed\n");
2216#endif /* CCISS_DEBUG */
2217
2218 if (complete == 1) {
2219 printk(KERN_WARNING
2220 "cciss cciss%d: SendCmd Timeout out, "
2221 "No command list address returned!\n", ctlr);
2222 status = IO_ERROR;
2223 done = 1;
2224 break;
2225 }
2226
2227 /* This will need to change for direct lookup completions */
2228 if ((complete & CISS_ERROR_BIT)
2229 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2230 /* if data overrun or underun on Report command
2231 ignore it
2232 */
2233 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2234 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2235 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2236 ((c->err_info->CommandStatus ==
2237 CMD_DATA_OVERRUN) ||
2238 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2239 )) {
2240 complete = c->busaddr;
2241 } else {
2242 if (c->err_info->CommandStatus ==
2243 CMD_UNSOLICITED_ABORT) {
2244 printk(KERN_WARNING "cciss%d: "
2245 "unsolicited abort %p\n",
2246 ctlr, c);
2247 if (c->retry_count < MAX_CMD_RETRIES) {
2248 printk(KERN_WARNING
2249 "cciss%d: retrying %p\n",
2250 ctlr, c);
2251 c->retry_count++;
2252 /* erase the old error */
2253 /* information */
2254 memset(c->err_info, 0,
2255 sizeof
2256 (ErrorInfo_struct));
2257 goto resend_cmd1;
2258 } else {
2259 printk(KERN_WARNING
2260 "cciss%d: retried %p too "
2261 "many times\n", ctlr, c);
2262 status = IO_ERROR;
2263 goto cleanup1;
2264 }
2265 } else if (c->err_info->CommandStatus ==
2266 CMD_UNABORTABLE) {
2267 printk(KERN_WARNING
2268 "cciss%d: command could not be aborted.\n",
2269 ctlr);
2270 status = IO_ERROR;
2271 goto cleanup1;
2272 }
2273 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2274 " Error %x \n", ctlr,
2275 c->err_info->CommandStatus);
2276 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2277 " offensive info\n"
2278 " size %x\n num %x value %x\n",
2279 ctlr,
2280 c->err_info->MoreErrInfo.Invalid_Cmd.
2281 offense_size,
2282 c->err_info->MoreErrInfo.Invalid_Cmd.
2283 offense_num,
2284 c->err_info->MoreErrInfo.Invalid_Cmd.
2285 offense_value);
2286 status = IO_ERROR;
2287 goto cleanup1;
2288 }
2289 }
2290 /* This will need changing for direct lookup completions */
2291 if (complete != c->busaddr) {
2292 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2293 BUG(); /* we are pretty much hosed if we get here. */
2294 }
2295 continue;
2296 } else
2297 done = 1;
2298 } while (!done);
2299
2300 cleanup1:
2301 /* unlock the data buffer from DMA */
2302 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2303 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2304 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2305 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2306#ifdef CONFIG_CISS_SCSI_TAPE
2307 /* if we saved some commands for later, process them now. */
2308 if (info_p->scsi_rejects.ncompletions > 0)
2309 do_cciss_intr(0, info_p);
2310#endif
2311 cmd_free(info_p, c, 1);
2312 return status;
2313}
2314
2315/*
2316 * Map (physical) PCI mem into (virtual) kernel space
2317 */
2318static void __iomem *remap_pci_mem(ulong base, ulong size)
2319{
2320 ulong page_base = ((ulong) base) & PAGE_MASK;
2321 ulong page_offs = ((ulong) base) - page_base;
2322 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2323
2324 return page_remapped ? (page_remapped + page_offs) : NULL;
2325}
2326
2327/*
2328 * Takes jobs of the Q and sends them to the hardware, then puts it on
2329 * the Q to wait for completion.
2330 */
2331static void start_io(ctlr_info_t *h)
2332{
2333 CommandList_struct *c;
2334
2335 while ((c = h->reqQ) != NULL) {
2336 /* can't do anything if fifo is full */
2337 if ((h->access.fifo_full(h))) {
2338 printk(KERN_WARNING "cciss: fifo full\n");
2339 break;
2340 }
2341
2342 /* Get the first entry from the Request Q */
2343 removeQ(&(h->reqQ), c);
2344 h->Qdepth--;
2345
2346 /* Tell the controller execute command */
2347 h->access.submit_command(h, c);
2348
2349 /* Put job onto the completed Q */
2350 addQ(&(h->cmpQ), c);
2351 }
2352}
2353
2354/* Assumes that CCISS_LOCK(h->ctlr) is held. */
2355/* Zeros out the error record and then resends the command back */
2356/* to the controller */
2357static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2358{
2359 /* erase the old error information */
2360 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2361
2362 /* add it to software queue and then send it to the controller */
2363 addQ(&(h->reqQ), c);
2364 h->Qdepth++;
2365 if (h->Qdepth > h->maxQsinceinit)
2366 h->maxQsinceinit = h->Qdepth;
2367
2368 start_io(h);
2369}
2370
2371/* checks the status of the job and calls complete buffers to mark all
2372 * buffers for the completed job. Note that this function does not need
2373 * to hold the hba/queue lock.
2374 */
2375static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2376 int timeout)
2377{
2378 int status = 1;
2379 int retry_cmd = 0;
2380
2381 if (timeout)
2382 status = 0;
2383
2384 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2385 switch (cmd->err_info->CommandStatus) {
2386 unsigned char sense_key;
2387 case CMD_TARGET_STATUS:
2388 status = 0;
2389
2390 if (cmd->err_info->ScsiStatus == 0x02) {
2391 printk(KERN_WARNING "cciss: cmd %p "
2392 "has CHECK CONDITION "
2393 " byte 2 = 0x%x\n", cmd,
2394 cmd->err_info->SenseInfo[2]
2395 );
2396 /* check the sense key */
2397 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2398 /* no status or recovered error */
2399 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2400 status = 1;
2401 }
2402 } else {
2403 printk(KERN_WARNING "cciss: cmd %p "
2404 "has SCSI Status 0x%x\n",
2405 cmd, cmd->err_info->ScsiStatus);
2406 }
2407 break;
2408 case CMD_DATA_UNDERRUN:
2409 printk(KERN_WARNING "cciss: cmd %p has"
2410 " completed with data underrun "
2411 "reported\n", cmd);
2412 break;
2413 case CMD_DATA_OVERRUN:
2414 printk(KERN_WARNING "cciss: cmd %p has"
2415 " completed with data overrun "
2416 "reported\n", cmd);
2417 break;
2418 case CMD_INVALID:
2419 printk(KERN_WARNING "cciss: cmd %p is "
2420 "reported invalid\n", cmd);
2421 status = 0;
2422 break;
2423 case CMD_PROTOCOL_ERR:
2424 printk(KERN_WARNING "cciss: cmd %p has "
2425 "protocol error \n", cmd);
2426 status = 0;
2427 break;
2428 case CMD_HARDWARE_ERR:
2429 printk(KERN_WARNING "cciss: cmd %p had "
2430 " hardware error\n", cmd);
2431 status = 0;
2432 break;
2433 case CMD_CONNECTION_LOST:
2434 printk(KERN_WARNING "cciss: cmd %p had "
2435 "connection lost\n", cmd);
2436 status = 0;
2437 break;
2438 case CMD_ABORTED:
2439 printk(KERN_WARNING "cciss: cmd %p was "
2440 "aborted\n", cmd);
2441 status = 0;
2442 break;
2443 case CMD_ABORT_FAILED:
2444 printk(KERN_WARNING "cciss: cmd %p reports "
2445 "abort failed\n", cmd);
2446 status = 0;
2447 break;
2448 case CMD_UNSOLICITED_ABORT:
2449 printk(KERN_WARNING "cciss%d: unsolicited "
2450 "abort %p\n", h->ctlr, cmd);
2451 if (cmd->retry_count < MAX_CMD_RETRIES) {
2452 retry_cmd = 1;
2453 printk(KERN_WARNING
2454 "cciss%d: retrying %p\n", h->ctlr, cmd);
2455 cmd->retry_count++;
2456 } else
2457 printk(KERN_WARNING
2458 "cciss%d: %p retried too "
2459 "many times\n", h->ctlr, cmd);
2460 status = 0;
2461 break;
2462 case CMD_TIMEOUT:
2463 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2464 status = 0;
2465 break;
2466 default:
2467 printk(KERN_WARNING "cciss: cmd %p returned "
2468 "unknown status %x\n", cmd,
2469 cmd->err_info->CommandStatus);
2470 status = 0;
2471 }
2472 }
2473 /* We need to return this command */
2474 if (retry_cmd) {
2475 resend_cciss_cmd(h, cmd);
2476 return;
2477 }
2478
2479 cmd->rq->completion_data = cmd;
2480 cmd->rq->errors = status;
2481 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2482 blk_complete_request(cmd->rq);
2483}
2484
2485/*
2486 * Get a request and submit it to the controller.
2487 */
2488static void do_cciss_request(request_queue_t *q)
2489{
2490 ctlr_info_t *h = q->queuedata;
2491 CommandList_struct *c;
2492 sector_t start_blk;
2493 int seg;
2494 struct request *creq;
2495 u64bit temp64;
2496 struct scatterlist tmp_sg[MAXSGENTRIES];
2497 drive_info_struct *drv;
2498 int i, dir;
2499
2500 /* We call start_io here in case there is a command waiting on the
2501 * queue that has not been sent.
2502 */
2503 if (blk_queue_plugged(q))
2504 goto startio;
2505
2506 queue:
2507 creq = elv_next_request(q);
2508 if (!creq)
2509 goto startio;
2510
2511 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2512
2513 if ((c = cmd_alloc(h, 1)) == NULL)
2514 goto full;
2515
2516 blkdev_dequeue_request(creq);
2517
2518 spin_unlock_irq(q->queue_lock);
2519
2520 c->cmd_type = CMD_RWREQ;
2521 c->rq = creq;
2522
2523 /* fill in the request */
2524 drv = creq->rq_disk->private_data;
2525 c->Header.ReplyQueue = 0; // unused in simple mode
2526 /* got command from pool, so use the command block index instead */
2527 /* for direct lookups. */
2528 /* The first 2 bits are reserved for controller error reporting. */
2529 c->Header.Tag.lower = (c->cmdindex << 3);
2530 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2531 c->Header.LUN.LogDev.VolId = drv->LunID;
2532 c->Header.LUN.LogDev.Mode = 1;
2533 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2534 c->Request.Type.Type = TYPE_CMD; // It is a command.
2535 c->Request.Type.Attribute = ATTR_SIMPLE;
2536 c->Request.Type.Direction =
2537 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2538 c->Request.Timeout = 0; // Don't time out
2539 c->Request.CDB[0] =
2540 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2541 start_blk = creq->sector;
2542#ifdef CCISS_DEBUG
2543 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2544 (int)creq->nr_sectors);
2545#endif /* CCISS_DEBUG */
2546
2547 seg = blk_rq_map_sg(q, creq, tmp_sg);
2548
2549 /* get the DMA records for the setup */
2550 if (c->Request.Type.Direction == XFER_READ)
2551 dir = PCI_DMA_FROMDEVICE;
2552 else
2553 dir = PCI_DMA_TODEVICE;
2554
2555 for (i = 0; i < seg; i++) {
2556 c->SG[i].Len = tmp_sg[i].length;
2557 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2558 tmp_sg[i].offset,
2559 tmp_sg[i].length, dir);
2560 c->SG[i].Addr.lower = temp64.val32.lower;
2561 c->SG[i].Addr.upper = temp64.val32.upper;
2562 c->SG[i].Ext = 0; // we are not chaining
2563 }
2564 /* track how many SG entries we are using */
2565 if (seg > h->maxSG)
2566 h->maxSG = seg;
2567
2568#ifdef CCISS_DEBUG
2569 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2570 creq->nr_sectors, seg);
2571#endif /* CCISS_DEBUG */
2572
2573 c->Header.SGList = c->Header.SGTotal = seg;
2574 if(h->cciss_read == CCISS_READ_10) {
2575 c->Request.CDB[1] = 0;
2576 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2577 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2578 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2579 c->Request.CDB[5] = start_blk & 0xff;
2580 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2581 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2582 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2583 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2584 } else {
2585 c->Request.CDBLen = 16;
2586 c->Request.CDB[1]= 0;
2587 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2588 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2589 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2590 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2591 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2592 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2593 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2594 c->Request.CDB[9]= start_blk & 0xff;
2595 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2596 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2597 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2598 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2599 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2600 }
2601
2602 spin_lock_irq(q->queue_lock);
2603
2604 addQ(&(h->reqQ), c);
2605 h->Qdepth++;
2606 if (h->Qdepth > h->maxQsinceinit)
2607 h->maxQsinceinit = h->Qdepth;
2608
2609 goto queue;
2610full:
2611 blk_stop_queue(q);
2612startio:
2613 /* We will already have the driver lock here so not need
2614 * to lock it.
2615 */
2616 start_io(h);
2617}
2618
2619static inline unsigned long get_next_completion(ctlr_info_t *h)
2620{
2621#ifdef CONFIG_CISS_SCSI_TAPE
2622 /* Any rejects from sendcmd() lying around? Process them first */
2623 if (h->scsi_rejects.ncompletions == 0)
2624 return h->access.command_completed(h);
2625 else {
2626 struct sendcmd_reject_list *srl;
2627 int n;
2628 srl = &h->scsi_rejects;
2629 n = --srl->ncompletions;
2630 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2631 printk("p");
2632 return srl->complete[n];
2633 }
2634#else
2635 return h->access.command_completed(h);
2636#endif
2637}
2638
2639static inline int interrupt_pending(ctlr_info_t *h)
2640{
2641#ifdef CONFIG_CISS_SCSI_TAPE
2642 return (h->access.intr_pending(h)
2643 || (h->scsi_rejects.ncompletions > 0));
2644#else
2645 return h->access.intr_pending(h);
2646#endif
2647}
2648
2649static inline long interrupt_not_for_us(ctlr_info_t *h)
2650{
2651#ifdef CONFIG_CISS_SCSI_TAPE
2652 return (((h->access.intr_pending(h) == 0) ||
2653 (h->interrupts_enabled == 0))
2654 && (h->scsi_rejects.ncompletions == 0));
2655#else
2656 return (((h->access.intr_pending(h) == 0) ||
2657 (h->interrupts_enabled == 0)));
2658#endif
2659}
2660
2661static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2662{
2663 ctlr_info_t *h = dev_id;
2664 CommandList_struct *c;
2665 unsigned long flags;
2666 __u32 a, a1, a2;
2667
2668 if (interrupt_not_for_us(h))
2669 return IRQ_NONE;
2670 /*
2671 * If there are completed commands in the completion queue,
2672 * we had better do something about it.
2673 */
2674 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2675 while (interrupt_pending(h)) {
2676 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2677 a1 = a;
2678 if ((a & 0x04)) {
2679 a2 = (a >> 3);
2680 if (a2 >= h->nr_cmds) {
2681 printk(KERN_WARNING
2682 "cciss: controller cciss%d failed, stopping.\n",
2683 h->ctlr);
2684 fail_all_cmds(h->ctlr);
2685 return IRQ_HANDLED;
2686 }
2687
2688 c = h->cmd_pool + a2;
2689 a = c->busaddr;
2690
2691 } else {
2692 a &= ~3;
2693 if ((c = h->cmpQ) == NULL) {
2694 printk(KERN_WARNING
2695 "cciss: Completion of %08x ignored\n",
2696 a1);
2697 continue;
2698 }
2699 while (c->busaddr != a) {
2700 c = c->next;
2701 if (c == h->cmpQ)
2702 break;
2703 }
2704 }
2705 /*
2706 * If we've found the command, take it off the
2707 * completion Q and free it
2708 */
2709 if (c->busaddr == a) {
2710 removeQ(&h->cmpQ, c);
2711 if (c->cmd_type == CMD_RWREQ) {
2712 complete_command(h, c, 0);
2713 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2714 complete(c->waiting);
2715 }
2716# ifdef CONFIG_CISS_SCSI_TAPE
2717 else if (c->cmd_type == CMD_SCSI)
2718 complete_scsi_command(c, 0, a1);
2719# endif
2720 continue;
2721 }
2722 }
2723 }
2724
2725 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2726 return IRQ_HANDLED;
2727}
2728
2729/*
2730 * We cannot read the structure directly, for portability we must use
2731 * the io functions.
2732 * This is for debug only.
2733 */
2734#ifdef CCISS_DEBUG
2735static void print_cfg_table(CfgTable_struct *tb)
2736{
2737 int i;
2738 char temp_name[17];
2739
2740 printk("Controller Configuration information\n");
2741 printk("------------------------------------\n");
2742 for (i = 0; i < 4; i++)
2743 temp_name[i] = readb(&(tb->Signature[i]));
2744 temp_name[4] = '\0';
2745 printk(" Signature = %s\n", temp_name);
2746 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2747 printk(" Transport methods supported = 0x%x\n",
2748 readl(&(tb->TransportSupport)));
2749 printk(" Transport methods active = 0x%x\n",
2750 readl(&(tb->TransportActive)));
2751 printk(" Requested transport Method = 0x%x\n",
2752 readl(&(tb->HostWrite.TransportRequest)));
2753 printk(" Coalesce Interrupt Delay = 0x%x\n",
2754 readl(&(tb->HostWrite.CoalIntDelay)));
2755 printk(" Coalesce Interrupt Count = 0x%x\n",
2756 readl(&(tb->HostWrite.CoalIntCount)));
2757 printk(" Max outstanding commands = 0x%d\n",
2758 readl(&(tb->CmdsOutMax)));
2759 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2760 for (i = 0; i < 16; i++)
2761 temp_name[i] = readb(&(tb->ServerName[i]));
2762 temp_name[16] = '\0';
2763 printk(" Server Name = %s\n", temp_name);
2764 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2765}
2766#endif /* CCISS_DEBUG */
2767
2768static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2769{
2770 int i, offset, mem_type, bar_type;
2771 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2772 return 0;
2773 offset = 0;
2774 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2775 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2776 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2777 offset += 4;
2778 else {
2779 mem_type = pci_resource_flags(pdev, i) &
2780 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2781 switch (mem_type) {
2782 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2783 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2784 offset += 4; /* 32 bit */
2785 break;
2786 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2787 offset += 8;
2788 break;
2789 default: /* reserved in PCI 2.2 */
2790 printk(KERN_WARNING
2791 "Base address is invalid\n");
2792 return -1;
2793 break;
2794 }
2795 }
2796 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2797 return i + 1;
2798 }
2799 return -1;
2800}
2801
2802/* If MSI/MSI-X is supported by the kernel we will try to enable it on
2803 * controllers that are capable. If not, we use IO-APIC mode.
2804 */
2805
2806static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2807 struct pci_dev *pdev, __u32 board_id)
2808{
2809#ifdef CONFIG_PCI_MSI
2810 int err;
2811 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2812 {0, 2}, {0, 3}
2813 };
2814
2815 /* Some boards advertise MSI but don't really support it */
2816 if ((board_id == 0x40700E11) ||
2817 (board_id == 0x40800E11) ||
2818 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2819 goto default_int_mode;
2820
2821 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2822 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2823 if (!err) {
2824 c->intr[0] = cciss_msix_entries[0].vector;
2825 c->intr[1] = cciss_msix_entries[1].vector;
2826 c->intr[2] = cciss_msix_entries[2].vector;
2827 c->intr[3] = cciss_msix_entries[3].vector;
2828 c->msix_vector = 1;
2829 return;
2830 }
2831 if (err > 0) {
2832 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2833 "available\n", err);
2834 } else {
2835 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2836 err);
2837 }
2838 }
2839 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2840 if (!pci_enable_msi(pdev)) {
2841 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2842 c->msi_vector = 1;
2843 return;
2844 } else {
2845 printk(KERN_WARNING "cciss: MSI init failed\n");
2846 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2847 return;
2848 }
2849 }
2850 default_int_mode:
2851#endif /* CONFIG_PCI_MSI */
2852 /* if we get here we're going to use the default interrupt mode */
2853 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2854 return;
2855}
2856
2857static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2858{
2859 ushort subsystem_vendor_id, subsystem_device_id, command;
2860 __u32 board_id, scratchpad = 0;
2861 __u64 cfg_offset;
2862 __u32 cfg_base_addr;
2863 __u64 cfg_base_addr_index;
2864 int i, err;
2865
2866 /* check to see if controller has been disabled */
2867 /* BEFORE trying to enable it */
2868 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2869 if (!(command & 0x02)) {
2870 printk(KERN_WARNING
2871 "cciss: controller appears to be disabled\n");
2872 return -ENODEV;
2873 }
2874
2875 err = pci_enable_device(pdev);
2876 if (err) {
2877 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2878 return err;
2879 }
2880
2881 err = pci_request_regions(pdev, "cciss");
2882 if (err) {
2883 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2884 "aborting\n");
2885 goto err_out_disable_pdev;
2886 }
2887
2888 subsystem_vendor_id = pdev->subsystem_vendor;
2889 subsystem_device_id = pdev->subsystem_device;
2890 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2891 subsystem_vendor_id);
2892
2893#ifdef CCISS_DEBUG
2894 printk("command = %x\n", command);
2895 printk("irq = %x\n", pdev->irq);
2896 printk("board_id = %x\n", board_id);
2897#endif /* CCISS_DEBUG */
2898
2899/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2900 * else we use the IO-APIC interrupt assigned to us by system ROM.
2901 */
2902 cciss_interrupt_mode(c, pdev, board_id);
2903
2904 /*
2905 * Memory base addr is first addr , the second points to the config
2906 * table
2907 */
2908
2909 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2910#ifdef CCISS_DEBUG
2911 printk("address 0 = %x\n", c->paddr);
2912#endif /* CCISS_DEBUG */
2913 c->vaddr = remap_pci_mem(c->paddr, 200);
2914
2915 /* Wait for the board to become ready. (PCI hotplug needs this.)
2916 * We poll for up to 120 secs, once per 100ms. */
2917 for (i = 0; i < 1200; i++) {
2918 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2919 if (scratchpad == CCISS_FIRMWARE_READY)
2920 break;
2921 set_current_state(TASK_INTERRUPTIBLE);
2922 schedule_timeout(HZ / 10); /* wait 100ms */
2923 }
2924 if (scratchpad != CCISS_FIRMWARE_READY) {
2925 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2926 err = -ENODEV;
2927 goto err_out_free_res;
2928 }
2929
2930 /* get the address index number */
2931 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2932 cfg_base_addr &= (__u32) 0x0000ffff;
2933#ifdef CCISS_DEBUG
2934 printk("cfg base address = %x\n", cfg_base_addr);
2935#endif /* CCISS_DEBUG */
2936 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2937#ifdef CCISS_DEBUG
2938 printk("cfg base address index = %x\n", cfg_base_addr_index);
2939#endif /* CCISS_DEBUG */
2940 if (cfg_base_addr_index == -1) {
2941 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2942 err = -ENODEV;
2943 goto err_out_free_res;
2944 }
2945
2946 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2947#ifdef CCISS_DEBUG
2948 printk("cfg offset = %x\n", cfg_offset);
2949#endif /* CCISS_DEBUG */
2950 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2951 cfg_base_addr_index) +
2952 cfg_offset, sizeof(CfgTable_struct));
2953 c->board_id = board_id;
2954
2955#ifdef CCISS_DEBUG
2956 print_cfg_table(c->cfgtable);
2957#endif /* CCISS_DEBUG */
2958
2959 for (i = 0; i < ARRAY_SIZE(products); i++) {
2960 if (board_id == products[i].board_id) {
2961 c->product_name = products[i].product_name;
2962 c->access = *(products[i].access);
2963 c->nr_cmds = products[i].nr_cmds;
2964 break;
2965 }
2966 }
2967 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2968 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2969 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2970 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2971 printk("Does not appear to be a valid CISS config table\n");
2972 err = -ENODEV;
2973 goto err_out_free_res;
2974 }
2975 /* We didn't find the controller in our list. We know the
2976 * signature is valid. If it's an HP device let's try to
2977 * bind to the device and fire it up. Otherwise we bail.
2978 */
2979 if (i == ARRAY_SIZE(products)) {
2980 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
2981 c->product_name = products[i-1].product_name;
2982 c->access = *(products[i-1].access);
2983 c->nr_cmds = products[i-1].nr_cmds;
2984 printk(KERN_WARNING "cciss: This is an unknown "
2985 "Smart Array controller.\n"
2986 "cciss: Please update to the latest driver "
2987 "available from www.hp.com.\n");
2988 } else {
2989 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2990 " to access the Smart Array controller %08lx\n"
2991 , (unsigned long)board_id);
2992 err = -ENODEV;
2993 goto err_out_free_res;
2994 }
2995 }
2996#ifdef CONFIG_X86
2997 {
2998 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2999 __u32 prefetch;
3000 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3001 prefetch |= 0x100;
3002 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3003 }
3004#endif
3005
3006 /* Disabling DMA prefetch for the P600
3007 * An ASIC bug may result in a prefetch beyond
3008 * physical memory.
3009 */
3010 if(board_id == 0x3225103C) {
3011 __u32 dma_prefetch;
3012 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3013 dma_prefetch |= 0x8000;
3014 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3015 }
3016
3017#ifdef CCISS_DEBUG
3018 printk("Trying to put board into Simple mode\n");
3019#endif /* CCISS_DEBUG */
3020 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3021 /* Update the field, and then ring the doorbell */
3022 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3023 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3024
3025 /* under certain very rare conditions, this can take awhile.
3026 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3027 * as we enter this code.) */
3028 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3029 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3030 break;
3031 /* delay and try again */
3032 set_current_state(TASK_INTERRUPTIBLE);
3033 schedule_timeout(10);
3034 }
3035
3036#ifdef CCISS_DEBUG
3037 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3038 readl(c->vaddr + SA5_DOORBELL));
3039#endif /* CCISS_DEBUG */
3040#ifdef CCISS_DEBUG
3041 print_cfg_table(c->cfgtable);
3042#endif /* CCISS_DEBUG */
3043
3044 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3045 printk(KERN_WARNING "cciss: unable to get board into"
3046 " simple mode\n");
3047 err = -ENODEV;
3048 goto err_out_free_res;
3049 }
3050 return 0;
3051
3052 err_out_free_res:
3053 pci_release_regions(pdev);
3054
3055 err_out_disable_pdev:
3056 pci_disable_device(pdev);
3057 return err;
3058}
3059
3060/*
3061 * Gets information about the local volumes attached to the controller.
3062 */
3063static void cciss_getgeometry(int cntl_num)
3064{
3065 ReportLunData_struct *ld_buff;
3066 InquiryData_struct *inq_buff;
3067 int return_code;
3068 int i;
3069 int listlength = 0;
3070 __u32 lunid = 0;
3071 int block_size;
3072 sector_t total_size;
3073
3074 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3075 if (ld_buff == NULL) {
3076 printk(KERN_ERR "cciss: out of memory\n");
3077 return;
3078 }
3079 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3080 if (inq_buff == NULL) {
3081 printk(KERN_ERR "cciss: out of memory\n");
3082 kfree(ld_buff);
3083 return;
3084 }
3085 /* Get the firmware version */
3086 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3087 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3088 TYPE_CMD);
3089 if (return_code == IO_OK) {
3090 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3091 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3092 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3093 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3094 } else { /* send command failed */
3095
3096 printk(KERN_WARNING "cciss: unable to determine firmware"
3097 " version of controller\n");
3098 }
3099 /* Get the number of logical volumes */
3100 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3101 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3102 TYPE_CMD);
3103
3104 if (return_code == IO_OK) {
3105#ifdef CCISS_DEBUG
3106 printk("LUN Data\n--------------------------\n");
3107#endif /* CCISS_DEBUG */
3108
3109 listlength |=
3110 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3111 listlength |=
3112 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3113 listlength |=
3114 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3115 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3116 } else { /* reading number of logical volumes failed */
3117
3118 printk(KERN_WARNING "cciss: report logical volume"
3119 " command failed\n");
3120 listlength = 0;
3121 }
3122 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3123 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3124 printk(KERN_ERR
3125 "ciss: only %d number of logical volumes supported\n",
3126 CISS_MAX_LUN);
3127 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3128 }
3129#ifdef CCISS_DEBUG
3130 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3131 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3132 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3133 hba[cntl_num]->num_luns);
3134#endif /* CCISS_DEBUG */
3135
3136 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3137 for (i = 0; i < CISS_MAX_LUN; i++) {
3138 if (i < hba[cntl_num]->num_luns) {
3139 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3140 << 24;
3141 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3142 << 16;
3143 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3144 << 8;
3145 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3146
3147 hba[cntl_num]->drv[i].LunID = lunid;
3148
3149#ifdef CCISS_DEBUG
3150 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3151 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3152 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3153 hba[cntl_num]->drv[i].LunID);
3154#endif /* CCISS_DEBUG */
3155
3156 /* testing to see if 16-byte CDBs are already being used */
3157 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3158 cciss_read_capacity_16(cntl_num, i, 0,
3159 &total_size, &block_size);
3160 goto geo_inq;
3161 }
3162 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3163
3164 /* total_size = last LBA + 1 */
3165 if(total_size == (__u32) 0) {
3166 cciss_read_capacity_16(cntl_num, i, 0,
3167 &total_size, &block_size);
3168 hba[cntl_num]->cciss_read = CCISS_READ_16;
3169 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3170 } else {
3171 hba[cntl_num]->cciss_read = CCISS_READ_10;
3172 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3173 }
3174geo_inq:
3175 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3176 block_size, inq_buff,
3177 &hba[cntl_num]->drv[i]);
3178 } else {
3179 /* initialize raid_level to indicate a free space */
3180 hba[cntl_num]->drv[i].raid_level = -1;
3181 }
3182 }
3183 kfree(ld_buff);
3184 kfree(inq_buff);
3185}
3186
3187/* Function to find the first free pointer into our hba[] array */
3188/* Returns -1 if no free entries are left. */
3189static int alloc_cciss_hba(void)
3190{
3191 struct gendisk *disk[NWD];
3192 int i, n;
3193 for (n = 0; n < NWD; n++) {
3194 disk[n] = alloc_disk(1 << NWD_SHIFT);
3195 if (!disk[n])
3196 goto out;
3197 }
3198
3199 for (i = 0; i < MAX_CTLR; i++) {
3200 if (!hba[i]) {
3201 ctlr_info_t *p;
3202 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3203 if (!p)
3204 goto Enomem;
3205 for (n = 0; n < NWD; n++)
3206 p->gendisk[n] = disk[n];
3207 hba[i] = p;
3208 return i;
3209 }
3210 }
3211 printk(KERN_WARNING "cciss: This driver supports a maximum"
3212 " of %d controllers.\n", MAX_CTLR);
3213 goto out;
3214 Enomem:
3215 printk(KERN_ERR "cciss: out of memory.\n");
3216 out:
3217 while (n--)
3218 put_disk(disk[n]);
3219 return -1;
3220}
3221
3222static void free_hba(int i)
3223{
3224 ctlr_info_t *p = hba[i];
3225 int n;
3226
3227 hba[i] = NULL;
3228 for (n = 0; n < NWD; n++)
3229 put_disk(p->gendisk[n]);
3230 kfree(p);
3231}
3232
3233/*
3234 * This is it. Find all the controllers and register them. I really hate
3235 * stealing all these major device numbers.
3236 * returns the number of block devices registered.
3237 */
3238static int __devinit cciss_init_one(struct pci_dev *pdev,
3239 const struct pci_device_id *ent)
3240{
3241 request_queue_t *q;
3242 int i;
3243 int j;
3244 int rc;
3245 int dac;
3246
3247 i = alloc_cciss_hba();
3248 if (i < 0)
3249 return -1;
3250
3251 hba[i]->busy_initializing = 1;
3252
3253 if (cciss_pci_init(hba[i], pdev) != 0)
3254 goto clean1;
3255
3256 sprintf(hba[i]->devname, "cciss%d", i);
3257 hba[i]->ctlr = i;
3258 hba[i]->pdev = pdev;
3259
3260 /* configure PCI DMA stuff */
3261 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3262 dac = 1;
3263 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3264 dac = 0;
3265 else {
3266 printk(KERN_ERR "cciss: no suitable DMA available\n");
3267 goto clean1;
3268 }
3269
3270 /*
3271 * register with the major number, or get a dynamic major number
3272 * by passing 0 as argument. This is done for greater than
3273 * 8 controller support.
3274 */
3275 if (i < MAX_CTLR_ORIG)
3276 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3277 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3278 if (rc == -EBUSY || rc == -EINVAL) {
3279 printk(KERN_ERR
3280 "cciss: Unable to get major number %d for %s "
3281 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3282 goto clean1;
3283 } else {
3284 if (i >= MAX_CTLR_ORIG)
3285 hba[i]->major = rc;
3286 }
3287
3288 /* make sure the board interrupts are off */
3289 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3290 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3291 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3292 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3293 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3294 goto clean2;
3295 }
3296
3297 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3298 hba[i]->devname, pdev->device, pci_name(pdev),
3299 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3300
3301 hba[i]->cmd_pool_bits =
3302 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3303 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3304 hba[i]->cmd_pool = (CommandList_struct *)
3305 pci_alloc_consistent(hba[i]->pdev,
3306 hba[i]->nr_cmds * sizeof(CommandList_struct),
3307 &(hba[i]->cmd_pool_dhandle));
3308 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3309 pci_alloc_consistent(hba[i]->pdev,
3310 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3311 &(hba[i]->errinfo_pool_dhandle));
3312 if ((hba[i]->cmd_pool_bits == NULL)
3313 || (hba[i]->cmd_pool == NULL)
3314 || (hba[i]->errinfo_pool == NULL)) {
3315 printk(KERN_ERR "cciss: out of memory");
3316 goto clean4;
3317 }
3318#ifdef CONFIG_CISS_SCSI_TAPE
3319 hba[i]->scsi_rejects.complete =
3320 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3321 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3322 if (hba[i]->scsi_rejects.complete == NULL) {
3323 printk(KERN_ERR "cciss: out of memory");
3324 goto clean4;
3325 }
3326#endif
3327 spin_lock_init(&hba[i]->lock);
3328
3329 /* Initialize the pdev driver private data.
3330 have it point to hba[i]. */
3331 pci_set_drvdata(pdev, hba[i]);
3332 /* command and error info recs zeroed out before
3333 they are used */
3334 memset(hba[i]->cmd_pool_bits, 0,
3335 ((hba[i]->nr_cmds + BITS_PER_LONG -
3336 1) / BITS_PER_LONG) * sizeof(unsigned long));
3337
3338#ifdef CCISS_DEBUG
3339 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3340#endif /* CCISS_DEBUG */
3341
3342 cciss_getgeometry(i);
3343
3344 cciss_scsi_setup(i);
3345
3346 /* Turn the interrupts on so we can service requests */
3347 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3348
3349 cciss_procinit(i);
3350 hba[i]->busy_initializing = 0;
3351
3352 for (j = 0; j < NWD; j++) { /* mfm */
3353 drive_info_struct *drv = &(hba[i]->drv[j]);
3354 struct gendisk *disk = hba[i]->gendisk[j];
3355
3356 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3357 if (!q) {
3358 printk(KERN_ERR
3359 "cciss: unable to allocate queue for disk %d\n",
3360 j);
3361 break;
3362 }
3363 drv->queue = q;
3364
3365 q->backing_dev_info.ra_pages = READ_AHEAD;
3366 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3367
3368 /* This is a hardware imposed limit. */
3369 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3370
3371 /* This is a limit in the driver and could be eliminated. */
3372 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3373
3374 blk_queue_max_sectors(q, 512);
3375
3376 blk_queue_softirq_done(q, cciss_softirq_done);
3377
3378 q->queuedata = hba[i];
3379 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3380 disk->major = hba[i]->major;
3381 disk->first_minor = j << NWD_SHIFT;
3382 disk->fops = &cciss_fops;
3383 disk->queue = q;
3384 disk->private_data = drv;
3385 disk->driverfs_dev = &pdev->dev;
3386 /* we must register the controller even if no disks exist */
3387 /* this is for the online array utilities */
3388 if (!drv->heads && j)
3389 continue;
3390 blk_queue_hardsect_size(q, drv->block_size);
3391 set_capacity(disk, drv->nr_blocks);
3392 add_disk(disk);
3393 }
3394
3395 return 1;
3396
3397 clean4:
3398#ifdef CONFIG_CISS_SCSI_TAPE
3399 kfree(hba[i]->scsi_rejects.complete);
3400#endif
3401 kfree(hba[i]->cmd_pool_bits);
3402 if (hba[i]->cmd_pool)
3403 pci_free_consistent(hba[i]->pdev,
3404 hba[i]->nr_cmds * sizeof(CommandList_struct),
3405 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3406 if (hba[i]->errinfo_pool)
3407 pci_free_consistent(hba[i]->pdev,
3408 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3409 hba[i]->errinfo_pool,
3410 hba[i]->errinfo_pool_dhandle);
3411 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3412 clean2:
3413 unregister_blkdev(hba[i]->major, hba[i]->devname);
3414 clean1:
3415 hba[i]->busy_initializing = 0;
3416 free_hba(i);
3417 return -1;
3418}
3419
3420static void __devexit cciss_remove_one(struct pci_dev *pdev)
3421{
3422 ctlr_info_t *tmp_ptr;
3423 int i, j;
3424 char flush_buf[4];
3425 int return_code;
3426
3427 if (pci_get_drvdata(pdev) == NULL) {
3428 printk(KERN_ERR "cciss: Unable to remove device \n");
3429 return;
3430 }
3431 tmp_ptr = pci_get_drvdata(pdev);
3432 i = tmp_ptr->ctlr;
3433 if (hba[i] == NULL) {
3434 printk(KERN_ERR "cciss: device appears to "
3435 "already be removed \n");
3436 return;
3437 }
3438 /* Turn board interrupts off and send the flush cache command */
3439 /* sendcmd will turn off interrupt, and send the flush...
3440 * To write all data in the battery backed cache to disks */
3441 memset(flush_buf, 0, 4);
3442 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3443 TYPE_CMD);
3444 if (return_code != IO_OK) {
3445 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3446 i);
3447 }
3448 free_irq(hba[i]->intr[2], hba[i]);
3449
3450#ifdef CONFIG_PCI_MSI
3451 if (hba[i]->msix_vector)
3452 pci_disable_msix(hba[i]->pdev);
3453 else if (hba[i]->msi_vector)
3454 pci_disable_msi(hba[i]->pdev);
3455#endif /* CONFIG_PCI_MSI */
3456
3457 iounmap(hba[i]->vaddr);
3458 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3459 unregister_blkdev(hba[i]->major, hba[i]->devname);
3460 remove_proc_entry(hba[i]->devname, proc_cciss);
3461
3462 /* remove it from the disk list */
3463 for (j = 0; j < NWD; j++) {
3464 struct gendisk *disk = hba[i]->gendisk[j];
3465 if (disk) {
3466 request_queue_t *q = disk->queue;
3467
3468 if (disk->flags & GENHD_FL_UP)
3469 del_gendisk(disk);
3470 if (q)
3471 blk_cleanup_queue(q);
3472 }
3473 }
3474
3475 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3476 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3477 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3478 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3479 kfree(hba[i]->cmd_pool_bits);
3480#ifdef CONFIG_CISS_SCSI_TAPE
3481 kfree(hba[i]->scsi_rejects.complete);
3482#endif
3483 pci_release_regions(pdev);
3484 pci_disable_device(pdev);
3485 pci_set_drvdata(pdev, NULL);
3486 free_hba(i);
3487}
3488
3489static struct pci_driver cciss_pci_driver = {
3490 .name = "cciss",
3491 .probe = cciss_init_one,
3492 .remove = __devexit_p(cciss_remove_one),
3493 .id_table = cciss_pci_device_id, /* id_table */
3494};
3495
3496/*
3497 * This is it. Register the PCI driver information for the cards we control
3498 * the OS will call our registered routines when it finds one of our cards.
3499 */
3500static int __init cciss_init(void)
3501{
3502 printk(KERN_INFO DRIVER_NAME "\n");
3503
3504 /* Register for our PCI devices */
3505 return pci_register_driver(&cciss_pci_driver);
3506}
3507
3508static void __exit cciss_cleanup(void)
3509{
3510 int i;
3511
3512 pci_unregister_driver(&cciss_pci_driver);
3513 /* double check that all controller entrys have been removed */
3514 for (i = 0; i < MAX_CTLR; i++) {
3515 if (hba[i] != NULL) {
3516 printk(KERN_WARNING "cciss: had to remove"
3517 " controller %d\n", i);
3518 cciss_remove_one(hba[i]->pdev);
3519 }
3520 }
3521 remove_proc_entry("cciss", proc_root_driver);
3522}
3523
3524static void fail_all_cmds(unsigned long ctlr)
3525{
3526 /* If we get here, the board is apparently dead. */
3527 ctlr_info_t *h = hba[ctlr];
3528 CommandList_struct *c;
3529 unsigned long flags;
3530
3531 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3532 h->alive = 0; /* the controller apparently died... */
3533
3534 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3535
3536 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3537
3538 /* move everything off the request queue onto the completed queue */
3539 while ((c = h->reqQ) != NULL) {
3540 removeQ(&(h->reqQ), c);
3541 h->Qdepth--;
3542 addQ(&(h->cmpQ), c);
3543 }
3544
3545 /* Now, fail everything on the completed queue with a HW error */
3546 while ((c = h->cmpQ) != NULL) {
3547 removeQ(&h->cmpQ, c);
3548 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3549 if (c->cmd_type == CMD_RWREQ) {
3550 complete_command(h, c, 0);
3551 } else if (c->cmd_type == CMD_IOCTL_PEND)
3552 complete(c->waiting);
3553#ifdef CONFIG_CISS_SCSI_TAPE
3554 else if (c->cmd_type == CMD_SCSI)
3555 complete_scsi_command(c, 0, 0);
3556#endif
3557 }
3558 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3559 return;
3560}
3561
3562module_init(cciss_init);
3563module_exit(cciss_cleanup);
This page took 0.036245 seconds and 5 git commands to generate.