cciss: add SG_IO ioctl to cciss
[deliverable/linux.git] / drivers / block / cciss.c
... / ...
CommitLineData
1/*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/bio.h>
33#include <linux/blkpg.h>
34#include <linux/timer.h>
35#include <linux/proc_fs.h>
36#include <linux/init.h>
37#include <linux/hdreg.h>
38#include <linux/spinlock.h>
39#include <linux/compat.h>
40#include <linux/blktrace_api.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43
44#include <linux/dma-mapping.h>
45#include <linux/blkdev.h>
46#include <linux/genhd.h>
47#include <linux/completion.h>
48#include <scsi/sg.h>
49#include <scsi/scsi_ioctl.h>
50#include <linux/cdrom.h>
51
52#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
53#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
54#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
55
56/* Embedded module documentation macros - see modules.h */
57MODULE_AUTHOR("Hewlett-Packard Company");
58MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
59MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
60 " SA6i P600 P800 P400 P400i E200 E200i E500");
61MODULE_VERSION("3.6.14");
62MODULE_LICENSE("GPL");
63
64#include "cciss_cmd.h"
65#include "cciss.h"
66#include <linux/cciss_ioctl.h>
67
68/* define the PCI info for the cards we can control */
69static const struct pci_device_id cciss_pci_device_id[] = {
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
89 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
90 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
91 {0,}
92};
93
94MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
95
96/* board_id = Subsystem Device ID & Vendor ID
97 * product = Marketing Name for the board
98 * access = Address of the struct of function pointers
99 * nr_cmds = Number of commands supported by controller
100 */
101static struct board_type products[] = {
102 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
103 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
104 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
105 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
106 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
107 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
108 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
109 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
110 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
111 {0x3225103C, "Smart Array P600", &SA5_access, 512},
112 {0x3223103C, "Smart Array P800", &SA5_access, 512},
113 {0x3234103C, "Smart Array P400", &SA5_access, 512},
114 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
115 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
116 {0x3212103C, "Smart Array E200", &SA5_access, 120},
117 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
119 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3237103C, "Smart Array E500", &SA5_access, 512},
121 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
122};
123
124/* How long to wait (in milliseconds) for board to go into simple mode */
125#define MAX_CONFIG_WAIT 30000
126#define MAX_IOCTL_CONFIG_WAIT 1000
127
128/*define how many times we will try a command because of bus resets */
129#define MAX_CMD_RETRIES 3
130
131#define READ_AHEAD 1024
132#define MAX_CTLR 32
133
134/* Originally cciss driver only supports 8 major numbers */
135#define MAX_CTLR_ORIG 8
136
137static ctlr_info_t *hba[MAX_CTLR];
138
139static void do_cciss_request(request_queue_t *q);
140static irqreturn_t do_cciss_intr(int irq, void *dev_id);
141static int cciss_open(struct inode *inode, struct file *filep);
142static int cciss_release(struct inode *inode, struct file *filep);
143static int cciss_ioctl(struct inode *inode, struct file *filep,
144 unsigned int cmd, unsigned long arg);
145static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
146
147static int cciss_revalidate(struct gendisk *disk);
148static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
149static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
150 int clear_all);
151
152static void cciss_read_capacity(int ctlr, int logvol, int withirq,
153 sector_t *total_size, unsigned int *block_size);
154static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
155 sector_t *total_size, unsigned int *block_size);
156static void cciss_geometry_inquiry(int ctlr, int logvol,
157 int withirq, sector_t total_size,
158 unsigned int block_size, InquiryData_struct *inq_buff,
159 drive_info_struct *drv);
160static void cciss_getgeometry(int cntl_num);
161static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
162 __u32);
163static void start_io(ctlr_info_t *h);
164static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
165 unsigned int use_unit_num, unsigned int log_unit,
166 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
167static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, int cmd_type);
170
171static void fail_all_cmds(unsigned long ctlr);
172
173#ifdef CONFIG_PROC_FS
174static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
175 int length, int *eof, void *data);
176static void cciss_procinit(int i);
177#else
178static void cciss_procinit(int i)
179{
180}
181#endif /* CONFIG_PROC_FS */
182
183#ifdef CONFIG_COMPAT
184static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
185#endif
186
187static struct block_device_operations cciss_fops = {
188 .owner = THIS_MODULE,
189 .open = cciss_open,
190 .release = cciss_release,
191 .ioctl = cciss_ioctl,
192 .getgeo = cciss_getgeo,
193#ifdef CONFIG_COMPAT
194 .compat_ioctl = cciss_compat_ioctl,
195#endif
196 .revalidate_disk = cciss_revalidate,
197};
198
199/*
200 * Enqueuing and dequeuing functions for cmdlists.
201 */
202static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
203{
204 if (*Qptr == NULL) {
205 *Qptr = c;
206 c->next = c->prev = c;
207 } else {
208 c->prev = (*Qptr)->prev;
209 c->next = (*Qptr);
210 (*Qptr)->prev->next = c;
211 (*Qptr)->prev = c;
212 }
213}
214
215static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
216 CommandList_struct *c)
217{
218 if (c && c->next != c) {
219 if (*Qptr == c)
220 *Qptr = c->next;
221 c->prev->next = c->next;
222 c->next->prev = c->prev;
223 } else {
224 *Qptr = NULL;
225 }
226 return c;
227}
228
229#include "cciss_scsi.c" /* For SCSI tape support */
230
231#define RAID_UNKNOWN 6
232
233#ifdef CONFIG_PROC_FS
234
235/*
236 * Report information about this controller.
237 */
238#define ENG_GIG 1000000000
239#define ENG_GIG_FACTOR (ENG_GIG/512)
240static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
241 "UNKNOWN"
242};
243
244static struct proc_dir_entry *proc_cciss;
245
246static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
247 int length, int *eof, void *data)
248{
249 off_t pos = 0;
250 off_t len = 0;
251 int size, i, ctlr;
252 ctlr_info_t *h = (ctlr_info_t *) data;
253 drive_info_struct *drv;
254 unsigned long flags;
255 sector_t vol_sz, vol_sz_frac;
256
257 ctlr = h->ctlr;
258
259 /* prevent displaying bogus info during configuration
260 * or deconfiguration of a logical volume
261 */
262 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
263 if (h->busy_configuring) {
264 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
265 return -EBUSY;
266 }
267 h->busy_configuring = 1;
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269
270 size = sprintf(buffer, "%s: HP %s Controller\n"
271 "Board ID: 0x%08lx\n"
272 "Firmware Version: %c%c%c%c\n"
273 "IRQ: %d\n"
274 "Logical drives: %d\n"
275 "Max sectors: %d\n"
276 "Current Q depth: %d\n"
277 "Current # commands on controller: %d\n"
278 "Max Q depth since init: %d\n"
279 "Max # commands on controller since init: %d\n"
280 "Max SG entries since init: %d\n\n",
281 h->devname,
282 h->product_name,
283 (unsigned long)h->board_id,
284 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
285 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
286 h->num_luns,
287 h->cciss_max_sectors,
288 h->Qdepth, h->commands_outstanding,
289 h->maxQsinceinit, h->max_outstanding, h->maxSG);
290
291 pos += size;
292 len += size;
293 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
294 for (i = 0; i <= h->highest_lun; i++) {
295
296 drv = &h->drv[i];
297 if (drv->heads == 0)
298 continue;
299
300 vol_sz = drv->nr_blocks;
301 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
302 vol_sz_frac *= 100;
303 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
304
305 if (drv->raid_level > 5)
306 drv->raid_level = RAID_UNKNOWN;
307 size = sprintf(buffer + len, "cciss/c%dd%d:"
308 "\t%4u.%02uGB\tRAID %s\n",
309 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
310 raid_label[drv->raid_level]);
311 pos += size;
312 len += size;
313 }
314
315 *eof = 1;
316 *start = buffer + offset;
317 len -= offset;
318 if (len > length)
319 len = length;
320 h->busy_configuring = 0;
321 return len;
322}
323
324static int
325cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
327{
328 unsigned char cmd[80];
329 int len;
330#ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
332 int rc;
333#endif
334
335 if (count > sizeof(cmd) - 1)
336 return -EINVAL;
337 if (copy_from_user(cmd, buffer, count))
338 return -EFAULT;
339 cmd[count] = '\0';
340 len = strlen(cmd); // above 3 lines ensure safety
341 if (len && cmd[len - 1] == '\n')
342 cmd[--len] = '\0';
343# ifdef CONFIG_CISS_SCSI_TAPE
344 if (strcmp("engage scsi", cmd) == 0) {
345 rc = cciss_engage_scsi(h->ctlr);
346 if (rc != 0)
347 return -rc;
348 return count;
349 }
350 /* might be nice to have "disengage" too, but it's not
351 safely possible. (only 1 module use count, lock issues.) */
352# endif
353 return -EINVAL;
354}
355
356/*
357 * Get us a file in /proc/cciss that says something about each controller.
358 * Create /proc/cciss if it doesn't exist yet.
359 */
360static void __devinit cciss_procinit(int i)
361{
362 struct proc_dir_entry *pde;
363
364 if (proc_cciss == NULL) {
365 proc_cciss = proc_mkdir("cciss", proc_root_driver);
366 if (!proc_cciss)
367 return;
368 }
369
370 pde = create_proc_read_entry(hba[i]->devname,
371 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
372 proc_cciss, cciss_proc_get_info, hba[i]);
373 pde->write_proc = cciss_proc_write;
374}
375#endif /* CONFIG_PROC_FS */
376
377/*
378 * For operations that cannot sleep, a command block is allocated at init,
379 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
380 * which ones are free or in use. For operations that can wait for kmalloc
381 * to possible sleep, this routine can be called with get_from_pool set to 0.
382 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
383 */
384static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
385{
386 CommandList_struct *c;
387 int i;
388 u64bit temp64;
389 dma_addr_t cmd_dma_handle, err_dma_handle;
390
391 if (!get_from_pool) {
392 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
393 sizeof(CommandList_struct), &cmd_dma_handle);
394 if (c == NULL)
395 return NULL;
396 memset(c, 0, sizeof(CommandList_struct));
397
398 c->cmdindex = -1;
399
400 c->err_info = (ErrorInfo_struct *)
401 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
402 &err_dma_handle);
403
404 if (c->err_info == NULL) {
405 pci_free_consistent(h->pdev,
406 sizeof(CommandList_struct), c, cmd_dma_handle);
407 return NULL;
408 }
409 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
410 } else { /* get it out of the controllers pool */
411
412 do {
413 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
414 if (i == h->nr_cmds)
415 return NULL;
416 } while (test_and_set_bit
417 (i & (BITS_PER_LONG - 1),
418 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
419#ifdef CCISS_DEBUG
420 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
421#endif
422 c = h->cmd_pool + i;
423 memset(c, 0, sizeof(CommandList_struct));
424 cmd_dma_handle = h->cmd_pool_dhandle
425 + i * sizeof(CommandList_struct);
426 c->err_info = h->errinfo_pool + i;
427 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
428 err_dma_handle = h->errinfo_pool_dhandle
429 + i * sizeof(ErrorInfo_struct);
430 h->nr_allocs++;
431
432 c->cmdindex = i;
433 }
434
435 c->busaddr = (__u32) cmd_dma_handle;
436 temp64.val = (__u64) err_dma_handle;
437 c->ErrDesc.Addr.lower = temp64.val32.lower;
438 c->ErrDesc.Addr.upper = temp64.val32.upper;
439 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
440
441 c->ctlr = h->ctlr;
442 return c;
443}
444
445/*
446 * Frees a command block that was previously allocated with cmd_alloc().
447 */
448static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
449{
450 int i;
451 u64bit temp64;
452
453 if (!got_from_pool) {
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
460 } else {
461 i = c - h->cmd_pool;
462 clear_bit(i & (BITS_PER_LONG - 1),
463 h->cmd_pool_bits + (i / BITS_PER_LONG));
464 h->nr_frees++;
465 }
466}
467
468static inline ctlr_info_t *get_host(struct gendisk *disk)
469{
470 return disk->queue->queuedata;
471}
472
473static inline drive_info_struct *get_drv(struct gendisk *disk)
474{
475 return disk->private_data;
476}
477
478/*
479 * Open. Make sure the device is really there.
480 */
481static int cciss_open(struct inode *inode, struct file *filep)
482{
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
485
486#ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488#endif /* CCISS_DEBUG */
489
490 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY;
492 /*
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
499 */
500 if (drv->heads == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
504 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
507 return -ENXIO;
508 }
509 }
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
512 }
513 drv->usage_count++;
514 host->usage_count++;
515 return 0;
516}
517
518/*
519 * Close. Sync first.
520 */
521static int cciss_release(struct inode *inode, struct file *filep)
522{
523 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
524 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
525
526#ifdef CCISS_DEBUG
527 printk(KERN_DEBUG "cciss_release %s\n",
528 inode->i_bdev->bd_disk->disk_name);
529#endif /* CCISS_DEBUG */
530
531 drv->usage_count--;
532 host->usage_count--;
533 return 0;
534}
535
536#ifdef CONFIG_COMPAT
537
538static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
539{
540 int ret;
541 lock_kernel();
542 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
543 unlock_kernel();
544 return ret;
545}
546
547static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
548 unsigned long arg);
549static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
550 unsigned long arg);
551
552static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
553{
554 switch (cmd) {
555 case CCISS_GETPCIINFO:
556 case CCISS_GETINTINFO:
557 case CCISS_SETINTINFO:
558 case CCISS_GETNODENAME:
559 case CCISS_SETNODENAME:
560 case CCISS_GETHEARTBEAT:
561 case CCISS_GETBUSTYPES:
562 case CCISS_GETFIRMVER:
563 case CCISS_GETDRIVVER:
564 case CCISS_REVALIDVOLS:
565 case CCISS_DEREGDISK:
566 case CCISS_REGNEWDISK:
567 case CCISS_REGNEWD:
568 case CCISS_RESCANDISK:
569 case CCISS_GETLUNINFO:
570 return do_ioctl(f, cmd, arg);
571
572 case CCISS_PASSTHRU32:
573 return cciss_ioctl32_passthru(f, cmd, arg);
574 case CCISS_BIG_PASSTHRU32:
575 return cciss_ioctl32_big_passthru(f, cmd, arg);
576
577 default:
578 return -ENOIOCTLCMD;
579 }
580}
581
582static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
583 unsigned long arg)
584{
585 IOCTL32_Command_struct __user *arg32 =
586 (IOCTL32_Command_struct __user *) arg;
587 IOCTL_Command_struct arg64;
588 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
589 int err;
590 u32 cp;
591
592 err = 0;
593 err |=
594 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
595 sizeof(arg64.LUN_info));
596 err |=
597 copy_from_user(&arg64.Request, &arg32->Request,
598 sizeof(arg64.Request));
599 err |=
600 copy_from_user(&arg64.error_info, &arg32->error_info,
601 sizeof(arg64.error_info));
602 err |= get_user(arg64.buf_size, &arg32->buf_size);
603 err |= get_user(cp, &arg32->buf);
604 arg64.buf = compat_ptr(cp);
605 err |= copy_to_user(p, &arg64, sizeof(arg64));
606
607 if (err)
608 return -EFAULT;
609
610 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
611 if (err)
612 return err;
613 err |=
614 copy_in_user(&arg32->error_info, &p->error_info,
615 sizeof(arg32->error_info));
616 if (err)
617 return -EFAULT;
618 return err;
619}
620
621static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
622 unsigned long arg)
623{
624 BIG_IOCTL32_Command_struct __user *arg32 =
625 (BIG_IOCTL32_Command_struct __user *) arg;
626 BIG_IOCTL_Command_struct arg64;
627 BIG_IOCTL_Command_struct __user *p =
628 compat_alloc_user_space(sizeof(arg64));
629 int err;
630 u32 cp;
631
632 err = 0;
633 err |=
634 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
635 sizeof(arg64.LUN_info));
636 err |=
637 copy_from_user(&arg64.Request, &arg32->Request,
638 sizeof(arg64.Request));
639 err |=
640 copy_from_user(&arg64.error_info, &arg32->error_info,
641 sizeof(arg64.error_info));
642 err |= get_user(arg64.buf_size, &arg32->buf_size);
643 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
644 err |= get_user(cp, &arg32->buf);
645 arg64.buf = compat_ptr(cp);
646 err |= copy_to_user(p, &arg64, sizeof(arg64));
647
648 if (err)
649 return -EFAULT;
650
651 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
652 if (err)
653 return err;
654 err |=
655 copy_in_user(&arg32->error_info, &p->error_info,
656 sizeof(arg32->error_info));
657 if (err)
658 return -EFAULT;
659 return err;
660}
661#endif
662
663static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
664{
665 drive_info_struct *drv = get_drv(bdev->bd_disk);
666
667 if (!drv->cylinders)
668 return -ENXIO;
669
670 geo->heads = drv->heads;
671 geo->sectors = drv->sectors;
672 geo->cylinders = drv->cylinders;
673 return 0;
674}
675
676/*
677 * ioctl
678 */
679static int cciss_ioctl(struct inode *inode, struct file *filep,
680 unsigned int cmd, unsigned long arg)
681{
682 struct block_device *bdev = inode->i_bdev;
683 struct gendisk *disk = bdev->bd_disk;
684 ctlr_info_t *host = get_host(disk);
685 drive_info_struct *drv = get_drv(disk);
686 int ctlr = host->ctlr;
687 void __user *argp = (void __user *)arg;
688
689#ifdef CCISS_DEBUG
690 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
691#endif /* CCISS_DEBUG */
692
693 switch (cmd) {
694 case CCISS_GETPCIINFO:
695 {
696 cciss_pci_info_struct pciinfo;
697
698 if (!arg)
699 return -EINVAL;
700 pciinfo.domain = pci_domain_nr(host->pdev->bus);
701 pciinfo.bus = host->pdev->bus->number;
702 pciinfo.dev_fn = host->pdev->devfn;
703 pciinfo.board_id = host->board_id;
704 if (copy_to_user
705 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
706 return -EFAULT;
707 return 0;
708 }
709 case CCISS_GETINTINFO:
710 {
711 cciss_coalint_struct intinfo;
712 if (!arg)
713 return -EINVAL;
714 intinfo.delay =
715 readl(&host->cfgtable->HostWrite.CoalIntDelay);
716 intinfo.count =
717 readl(&host->cfgtable->HostWrite.CoalIntCount);
718 if (copy_to_user
719 (argp, &intinfo, sizeof(cciss_coalint_struct)))
720 return -EFAULT;
721 return 0;
722 }
723 case CCISS_SETINTINFO:
724 {
725 cciss_coalint_struct intinfo;
726 unsigned long flags;
727 int i;
728
729 if (!arg)
730 return -EINVAL;
731 if (!capable(CAP_SYS_ADMIN))
732 return -EPERM;
733 if (copy_from_user
734 (&intinfo, argp, sizeof(cciss_coalint_struct)))
735 return -EFAULT;
736 if ((intinfo.delay == 0) && (intinfo.count == 0))
737 {
738// printk("cciss_ioctl: delay and count cannot be 0\n");
739 return -EINVAL;
740 }
741 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
742 /* Update the field, and then ring the doorbell */
743 writel(intinfo.delay,
744 &(host->cfgtable->HostWrite.CoalIntDelay));
745 writel(intinfo.count,
746 &(host->cfgtable->HostWrite.CoalIntCount));
747 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
748
749 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
750 if (!(readl(host->vaddr + SA5_DOORBELL)
751 & CFGTBL_ChangeReq))
752 break;
753 /* delay and try again */
754 udelay(1000);
755 }
756 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
757 if (i >= MAX_IOCTL_CONFIG_WAIT)
758 return -EAGAIN;
759 return 0;
760 }
761 case CCISS_GETNODENAME:
762 {
763 NodeName_type NodeName;
764 int i;
765
766 if (!arg)
767 return -EINVAL;
768 for (i = 0; i < 16; i++)
769 NodeName[i] =
770 readb(&host->cfgtable->ServerName[i]);
771 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
772 return -EFAULT;
773 return 0;
774 }
775 case CCISS_SETNODENAME:
776 {
777 NodeName_type NodeName;
778 unsigned long flags;
779 int i;
780
781 if (!arg)
782 return -EINVAL;
783 if (!capable(CAP_SYS_ADMIN))
784 return -EPERM;
785
786 if (copy_from_user
787 (NodeName, argp, sizeof(NodeName_type)))
788 return -EFAULT;
789
790 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
791
792 /* Update the field, and then ring the doorbell */
793 for (i = 0; i < 16; i++)
794 writeb(NodeName[i],
795 &host->cfgtable->ServerName[i]);
796
797 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
798
799 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
800 if (!(readl(host->vaddr + SA5_DOORBELL)
801 & CFGTBL_ChangeReq))
802 break;
803 /* delay and try again */
804 udelay(1000);
805 }
806 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
807 if (i >= MAX_IOCTL_CONFIG_WAIT)
808 return -EAGAIN;
809 return 0;
810 }
811
812 case CCISS_GETHEARTBEAT:
813 {
814 Heartbeat_type heartbeat;
815
816 if (!arg)
817 return -EINVAL;
818 heartbeat = readl(&host->cfgtable->HeartBeat);
819 if (copy_to_user
820 (argp, &heartbeat, sizeof(Heartbeat_type)))
821 return -EFAULT;
822 return 0;
823 }
824 case CCISS_GETBUSTYPES:
825 {
826 BusTypes_type BusTypes;
827
828 if (!arg)
829 return -EINVAL;
830 BusTypes = readl(&host->cfgtable->BusTypes);
831 if (copy_to_user
832 (argp, &BusTypes, sizeof(BusTypes_type)))
833 return -EFAULT;
834 return 0;
835 }
836 case CCISS_GETFIRMVER:
837 {
838 FirmwareVer_type firmware;
839
840 if (!arg)
841 return -EINVAL;
842 memcpy(firmware, host->firm_ver, 4);
843
844 if (copy_to_user
845 (argp, firmware, sizeof(FirmwareVer_type)))
846 return -EFAULT;
847 return 0;
848 }
849 case CCISS_GETDRIVVER:
850 {
851 DriverVer_type DriverVer = DRIVER_VERSION;
852
853 if (!arg)
854 return -EINVAL;
855
856 if (copy_to_user
857 (argp, &DriverVer, sizeof(DriverVer_type)))
858 return -EFAULT;
859 return 0;
860 }
861
862 case CCISS_REVALIDVOLS:
863 return rebuild_lun_table(host, NULL);
864
865 case CCISS_GETLUNINFO:{
866 LogvolInfo_struct luninfo;
867
868 luninfo.LunID = drv->LunID;
869 luninfo.num_opens = drv->usage_count;
870 luninfo.num_parts = 0;
871 if (copy_to_user(argp, &luninfo,
872 sizeof(LogvolInfo_struct)))
873 return -EFAULT;
874 return 0;
875 }
876 case CCISS_DEREGDISK:
877 return rebuild_lun_table(host, disk);
878
879 case CCISS_REGNEWD:
880 return rebuild_lun_table(host, NULL);
881
882 case CCISS_PASSTHRU:
883 {
884 IOCTL_Command_struct iocommand;
885 CommandList_struct *c;
886 char *buff = NULL;
887 u64bit temp64;
888 unsigned long flags;
889 DECLARE_COMPLETION_ONSTACK(wait);
890
891 if (!arg)
892 return -EINVAL;
893
894 if (!capable(CAP_SYS_RAWIO))
895 return -EPERM;
896
897 if (copy_from_user
898 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
899 return -EFAULT;
900 if ((iocommand.buf_size < 1) &&
901 (iocommand.Request.Type.Direction != XFER_NONE)) {
902 return -EINVAL;
903 }
904#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
905 /* Check kmalloc limits */
906 if (iocommand.buf_size > 128000)
907 return -EINVAL;
908#endif
909 if (iocommand.buf_size > 0) {
910 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
911 if (buff == NULL)
912 return -EFAULT;
913 }
914 if (iocommand.Request.Type.Direction == XFER_WRITE) {
915 /* Copy the data into the buffer we created */
916 if (copy_from_user
917 (buff, iocommand.buf, iocommand.buf_size)) {
918 kfree(buff);
919 return -EFAULT;
920 }
921 } else {
922 memset(buff, 0, iocommand.buf_size);
923 }
924 if ((c = cmd_alloc(host, 0)) == NULL) {
925 kfree(buff);
926 return -ENOMEM;
927 }
928 // Fill in the command type
929 c->cmd_type = CMD_IOCTL_PEND;
930 // Fill in Command Header
931 c->Header.ReplyQueue = 0; // unused in simple mode
932 if (iocommand.buf_size > 0) // buffer to fill
933 {
934 c->Header.SGList = 1;
935 c->Header.SGTotal = 1;
936 } else // no buffers to fill
937 {
938 c->Header.SGList = 0;
939 c->Header.SGTotal = 0;
940 }
941 c->Header.LUN = iocommand.LUN_info;
942 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
943
944 // Fill in Request block
945 c->Request = iocommand.Request;
946
947 // Fill in the scatter gather information
948 if (iocommand.buf_size > 0) {
949 temp64.val = pci_map_single(host->pdev, buff,
950 iocommand.buf_size,
951 PCI_DMA_BIDIRECTIONAL);
952 c->SG[0].Addr.lower = temp64.val32.lower;
953 c->SG[0].Addr.upper = temp64.val32.upper;
954 c->SG[0].Len = iocommand.buf_size;
955 c->SG[0].Ext = 0; // we are not chaining
956 }
957 c->waiting = &wait;
958
959 /* Put the request on the tail of the request queue */
960 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
961 addQ(&host->reqQ, c);
962 host->Qdepth++;
963 start_io(host);
964 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
965
966 wait_for_completion(&wait);
967
968 /* unlock the buffers from DMA */
969 temp64.val32.lower = c->SG[0].Addr.lower;
970 temp64.val32.upper = c->SG[0].Addr.upper;
971 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
972 iocommand.buf_size,
973 PCI_DMA_BIDIRECTIONAL);
974
975 /* Copy the error information out */
976 iocommand.error_info = *(c->err_info);
977 if (copy_to_user
978 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
979 kfree(buff);
980 cmd_free(host, c, 0);
981 return -EFAULT;
982 }
983
984 if (iocommand.Request.Type.Direction == XFER_READ) {
985 /* Copy the data out of the buffer we created */
986 if (copy_to_user
987 (iocommand.buf, buff, iocommand.buf_size)) {
988 kfree(buff);
989 cmd_free(host, c, 0);
990 return -EFAULT;
991 }
992 }
993 kfree(buff);
994 cmd_free(host, c, 0);
995 return 0;
996 }
997 case CCISS_BIG_PASSTHRU:{
998 BIG_IOCTL_Command_struct *ioc;
999 CommandList_struct *c;
1000 unsigned char **buff = NULL;
1001 int *buff_size = NULL;
1002 u64bit temp64;
1003 unsigned long flags;
1004 BYTE sg_used = 0;
1005 int status = 0;
1006 int i;
1007 DECLARE_COMPLETION_ONSTACK(wait);
1008 __u32 left;
1009 __u32 sz;
1010 BYTE __user *data_ptr;
1011
1012 if (!arg)
1013 return -EINVAL;
1014 if (!capable(CAP_SYS_RAWIO))
1015 return -EPERM;
1016 ioc = (BIG_IOCTL_Command_struct *)
1017 kmalloc(sizeof(*ioc), GFP_KERNEL);
1018 if (!ioc) {
1019 status = -ENOMEM;
1020 goto cleanup1;
1021 }
1022 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1023 status = -EFAULT;
1024 goto cleanup1;
1025 }
1026 if ((ioc->buf_size < 1) &&
1027 (ioc->Request.Type.Direction != XFER_NONE)) {
1028 status = -EINVAL;
1029 goto cleanup1;
1030 }
1031 /* Check kmalloc limits using all SGs */
1032 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1033 status = -EINVAL;
1034 goto cleanup1;
1035 }
1036 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1037 status = -EINVAL;
1038 goto cleanup1;
1039 }
1040 buff =
1041 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1042 if (!buff) {
1043 status = -ENOMEM;
1044 goto cleanup1;
1045 }
1046 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1047 GFP_KERNEL);
1048 if (!buff_size) {
1049 status = -ENOMEM;
1050 goto cleanup1;
1051 }
1052 left = ioc->buf_size;
1053 data_ptr = ioc->buf;
1054 while (left) {
1055 sz = (left >
1056 ioc->malloc_size) ? ioc->
1057 malloc_size : left;
1058 buff_size[sg_used] = sz;
1059 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1060 if (buff[sg_used] == NULL) {
1061 status = -ENOMEM;
1062 goto cleanup1;
1063 }
1064 if (ioc->Request.Type.Direction == XFER_WRITE) {
1065 if (copy_from_user
1066 (buff[sg_used], data_ptr, sz)) {
1067 status = -ENOMEM;
1068 goto cleanup1;
1069 }
1070 } else {
1071 memset(buff[sg_used], 0, sz);
1072 }
1073 left -= sz;
1074 data_ptr += sz;
1075 sg_used++;
1076 }
1077 if ((c = cmd_alloc(host, 0)) == NULL) {
1078 status = -ENOMEM;
1079 goto cleanup1;
1080 }
1081 c->cmd_type = CMD_IOCTL_PEND;
1082 c->Header.ReplyQueue = 0;
1083
1084 if (ioc->buf_size > 0) {
1085 c->Header.SGList = sg_used;
1086 c->Header.SGTotal = sg_used;
1087 } else {
1088 c->Header.SGList = 0;
1089 c->Header.SGTotal = 0;
1090 }
1091 c->Header.LUN = ioc->LUN_info;
1092 c->Header.Tag.lower = c->busaddr;
1093
1094 c->Request = ioc->Request;
1095 if (ioc->buf_size > 0) {
1096 int i;
1097 for (i = 0; i < sg_used; i++) {
1098 temp64.val =
1099 pci_map_single(host->pdev, buff[i],
1100 buff_size[i],
1101 PCI_DMA_BIDIRECTIONAL);
1102 c->SG[i].Addr.lower =
1103 temp64.val32.lower;
1104 c->SG[i].Addr.upper =
1105 temp64.val32.upper;
1106 c->SG[i].Len = buff_size[i];
1107 c->SG[i].Ext = 0; /* we are not chaining */
1108 }
1109 }
1110 c->waiting = &wait;
1111 /* Put the request on the tail of the request queue */
1112 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1113 addQ(&host->reqQ, c);
1114 host->Qdepth++;
1115 start_io(host);
1116 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1117 wait_for_completion(&wait);
1118 /* unlock the buffers from DMA */
1119 for (i = 0; i < sg_used; i++) {
1120 temp64.val32.lower = c->SG[i].Addr.lower;
1121 temp64.val32.upper = c->SG[i].Addr.upper;
1122 pci_unmap_single(host->pdev,
1123 (dma_addr_t) temp64.val, buff_size[i],
1124 PCI_DMA_BIDIRECTIONAL);
1125 }
1126 /* Copy the error information out */
1127 ioc->error_info = *(c->err_info);
1128 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1129 cmd_free(host, c, 0);
1130 status = -EFAULT;
1131 goto cleanup1;
1132 }
1133 if (ioc->Request.Type.Direction == XFER_READ) {
1134 /* Copy the data out of the buffer we created */
1135 BYTE __user *ptr = ioc->buf;
1136 for (i = 0; i < sg_used; i++) {
1137 if (copy_to_user
1138 (ptr, buff[i], buff_size[i])) {
1139 cmd_free(host, c, 0);
1140 status = -EFAULT;
1141 goto cleanup1;
1142 }
1143 ptr += buff_size[i];
1144 }
1145 }
1146 cmd_free(host, c, 0);
1147 status = 0;
1148 cleanup1:
1149 if (buff) {
1150 for (i = 0; i < sg_used; i++)
1151 kfree(buff[i]);
1152 kfree(buff);
1153 }
1154 kfree(buff_size);
1155 kfree(ioc);
1156 return status;
1157 }
1158
1159 /* scsi_cmd_ioctl handles these, below, though some are not */
1160 /* very meaningful for cciss. SG_IO is the main one people want. */
1161
1162 case SG_GET_VERSION_NUM:
1163 case SG_SET_TIMEOUT:
1164 case SG_GET_TIMEOUT:
1165 case SG_GET_RESERVED_SIZE:
1166 case SG_SET_RESERVED_SIZE:
1167 case SG_EMULATED_HOST:
1168 case SG_IO:
1169 case SCSI_IOCTL_SEND_COMMAND:
1170 return scsi_cmd_ioctl(filep, disk, cmd, argp);
1171
1172 /* scsi_cmd_ioctl would normally handle these, below, but */
1173 /* they aren't a good fit for cciss, as CD-ROMs are */
1174 /* not supported, and we don't have any bus/target/lun */
1175 /* which we present to the kernel. */
1176
1177 case CDROM_SEND_PACKET:
1178 case CDROMCLOSETRAY:
1179 case CDROMEJECT:
1180 case SCSI_IOCTL_GET_IDLUN:
1181 case SCSI_IOCTL_GET_BUS_NUMBER:
1182 default:
1183 return -ENOTTY;
1184 }
1185}
1186
1187static inline void complete_buffers(struct bio *bio, int status)
1188{
1189 while (bio) {
1190 struct bio *xbh = bio->bi_next;
1191 int nr_sectors = bio_sectors(bio);
1192
1193 bio->bi_next = NULL;
1194 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1195 bio = xbh;
1196 }
1197}
1198
1199static void cciss_check_queues(ctlr_info_t *h)
1200{
1201 int start_queue = h->next_to_run;
1202 int i;
1203
1204 /* check to see if we have maxed out the number of commands that can
1205 * be placed on the queue. If so then exit. We do this check here
1206 * in case the interrupt we serviced was from an ioctl and did not
1207 * free any new commands.
1208 */
1209 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1210 return;
1211
1212 /* We have room on the queue for more commands. Now we need to queue
1213 * them up. We will also keep track of the next queue to run so
1214 * that every queue gets a chance to be started first.
1215 */
1216 for (i = 0; i < h->highest_lun + 1; i++) {
1217 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1218 /* make sure the disk has been added and the drive is real
1219 * because this can be called from the middle of init_one.
1220 */
1221 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1222 continue;
1223 blk_start_queue(h->gendisk[curr_queue]->queue);
1224
1225 /* check to see if we have maxed out the number of commands
1226 * that can be placed on the queue.
1227 */
1228 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1229 if (curr_queue == start_queue) {
1230 h->next_to_run =
1231 (start_queue + 1) % (h->highest_lun + 1);
1232 break;
1233 } else {
1234 h->next_to_run = curr_queue;
1235 break;
1236 }
1237 } else {
1238 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1239 }
1240 }
1241}
1242
1243static void cciss_softirq_done(struct request *rq)
1244{
1245 CommandList_struct *cmd = rq->completion_data;
1246 ctlr_info_t *h = hba[cmd->ctlr];
1247 unsigned long flags;
1248 u64bit temp64;
1249 int i, ddir;
1250
1251 if (cmd->Request.Type.Direction == XFER_READ)
1252 ddir = PCI_DMA_FROMDEVICE;
1253 else
1254 ddir = PCI_DMA_TODEVICE;
1255
1256 /* command did not need to be retried */
1257 /* unmap the DMA mapping for all the scatter gather elements */
1258 for (i = 0; i < cmd->Header.SGList; i++) {
1259 temp64.val32.lower = cmd->SG[i].Addr.lower;
1260 temp64.val32.upper = cmd->SG[i].Addr.upper;
1261 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1262 }
1263
1264 complete_buffers(rq->bio, rq->errors);
1265
1266 if (blk_fs_request(rq)) {
1267 const int rw = rq_data_dir(rq);
1268
1269 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1270 }
1271
1272#ifdef CCISS_DEBUG
1273 printk("Done with %p\n", rq);
1274#endif /* CCISS_DEBUG */
1275
1276 add_disk_randomness(rq->rq_disk);
1277 spin_lock_irqsave(&h->lock, flags);
1278 end_that_request_last(rq, rq->errors);
1279 cmd_free(h, cmd, 1);
1280 cciss_check_queues(h);
1281 spin_unlock_irqrestore(&h->lock, flags);
1282}
1283
1284/* This function will check the usage_count of the drive to be updated/added.
1285 * If the usage_count is zero then the drive information will be updated and
1286 * the disk will be re-registered with the kernel. If not then it will be
1287 * left alone for the next reboot. The exception to this is disk 0 which
1288 * will always be left registered with the kernel since it is also the
1289 * controller node. Any changes to disk 0 will show up on the next
1290 * reboot.
1291 */
1292static void cciss_update_drive_info(int ctlr, int drv_index)
1293{
1294 ctlr_info_t *h = hba[ctlr];
1295 struct gendisk *disk;
1296 InquiryData_struct *inq_buff = NULL;
1297 unsigned int block_size;
1298 sector_t total_size;
1299 unsigned long flags = 0;
1300 int ret = 0;
1301
1302 /* if the disk already exists then deregister it before proceeding */
1303 if (h->drv[drv_index].raid_level != -1) {
1304 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1305 h->drv[drv_index].busy_configuring = 1;
1306 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1307 ret = deregister_disk(h->gendisk[drv_index],
1308 &h->drv[drv_index], 0);
1309 h->drv[drv_index].busy_configuring = 0;
1310 }
1311
1312 /* If the disk is in use return */
1313 if (ret)
1314 return;
1315
1316 /* Get information about the disk and modify the driver structure */
1317 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1318 if (inq_buff == NULL)
1319 goto mem_msg;
1320
1321 /* testing to see if 16-byte CDBs are already being used */
1322 if (h->cciss_read == CCISS_READ_16) {
1323 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1324 &total_size, &block_size);
1325 goto geo_inq;
1326 }
1327
1328 cciss_read_capacity(ctlr, drv_index, 1,
1329 &total_size, &block_size);
1330
1331 /* if read_capacity returns all F's this volume is >2TB in size */
1332 /* so we switch to 16-byte CDB's for all read/write ops */
1333 if (total_size == 0xFFFFFFFFULL) {
1334 cciss_read_capacity_16(ctlr, drv_index, 1,
1335 &total_size, &block_size);
1336 h->cciss_read = CCISS_READ_16;
1337 h->cciss_write = CCISS_WRITE_16;
1338 } else {
1339 h->cciss_read = CCISS_READ_10;
1340 h->cciss_write = CCISS_WRITE_10;
1341 }
1342geo_inq:
1343 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1344 inq_buff, &h->drv[drv_index]);
1345
1346 ++h->num_luns;
1347 disk = h->gendisk[drv_index];
1348 set_capacity(disk, h->drv[drv_index].nr_blocks);
1349
1350 /* if it's the controller it's already added */
1351 if (drv_index) {
1352 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1353 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1354 disk->major = h->major;
1355 disk->first_minor = drv_index << NWD_SHIFT;
1356 disk->fops = &cciss_fops;
1357 disk->private_data = &h->drv[drv_index];
1358
1359 /* Set up queue information */
1360 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1361 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1362
1363 /* This is a hardware imposed limit. */
1364 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1365
1366 /* This is a limit in the driver and could be eliminated. */
1367 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1368
1369 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1370
1371 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1372
1373 disk->queue->queuedata = hba[ctlr];
1374
1375 blk_queue_hardsect_size(disk->queue,
1376 hba[ctlr]->drv[drv_index].block_size);
1377
1378 h->drv[drv_index].queue = disk->queue;
1379 add_disk(disk);
1380 }
1381
1382 freeret:
1383 kfree(inq_buff);
1384 return;
1385 mem_msg:
1386 printk(KERN_ERR "cciss: out of memory\n");
1387 goto freeret;
1388}
1389
1390/* This function will find the first index of the controllers drive array
1391 * that has a -1 for the raid_level and will return that index. This is
1392 * where new drives will be added. If the index to be returned is greater
1393 * than the highest_lun index for the controller then highest_lun is set
1394 * to this new index. If there are no available indexes then -1 is returned.
1395 */
1396static int cciss_find_free_drive_index(int ctlr)
1397{
1398 int i;
1399
1400 for (i = 0; i < CISS_MAX_LUN; i++) {
1401 if (hba[ctlr]->drv[i].raid_level == -1) {
1402 if (i > hba[ctlr]->highest_lun)
1403 hba[ctlr]->highest_lun = i;
1404 return i;
1405 }
1406 }
1407 return -1;
1408}
1409
1410/* This function will add and remove logical drives from the Logical
1411 * drive array of the controller and maintain persistency of ordering
1412 * so that mount points are preserved until the next reboot. This allows
1413 * for the removal of logical drives in the middle of the drive array
1414 * without a re-ordering of those drives.
1415 * INPUT
1416 * h = The controller to perform the operations on
1417 * del_disk = The disk to remove if specified. If the value given
1418 * is NULL then no disk is removed.
1419 */
1420static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1421{
1422 int ctlr = h->ctlr;
1423 int num_luns;
1424 ReportLunData_struct *ld_buff = NULL;
1425 drive_info_struct *drv = NULL;
1426 int return_code;
1427 int listlength = 0;
1428 int i;
1429 int drv_found;
1430 int drv_index = 0;
1431 __u32 lunid = 0;
1432 unsigned long flags;
1433
1434 /* Set busy_configuring flag for this operation */
1435 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1436 if (h->busy_configuring) {
1437 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1438 return -EBUSY;
1439 }
1440 h->busy_configuring = 1;
1441
1442 /* if del_disk is NULL then we are being called to add a new disk
1443 * and update the logical drive table. If it is not NULL then
1444 * we will check if the disk is in use or not.
1445 */
1446 if (del_disk != NULL) {
1447 drv = get_drv(del_disk);
1448 drv->busy_configuring = 1;
1449 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1450 return_code = deregister_disk(del_disk, drv, 1);
1451 drv->busy_configuring = 0;
1452 h->busy_configuring = 0;
1453 return return_code;
1454 } else {
1455 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1456 if (!capable(CAP_SYS_RAWIO))
1457 return -EPERM;
1458
1459 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1460 if (ld_buff == NULL)
1461 goto mem_msg;
1462
1463 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1464 sizeof(ReportLunData_struct), 0,
1465 0, 0, TYPE_CMD);
1466
1467 if (return_code == IO_OK) {
1468 listlength =
1469 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1470 } else { /* reading number of logical volumes failed */
1471 printk(KERN_WARNING "cciss: report logical volume"
1472 " command failed\n");
1473 listlength = 0;
1474 goto freeret;
1475 }
1476
1477 num_luns = listlength / 8; /* 8 bytes per entry */
1478 if (num_luns > CISS_MAX_LUN) {
1479 num_luns = CISS_MAX_LUN;
1480 printk(KERN_WARNING "cciss: more luns configured"
1481 " on controller than can be handled by"
1482 " this driver.\n");
1483 }
1484
1485 /* Compare controller drive array to drivers drive array.
1486 * Check for updates in the drive information and any new drives
1487 * on the controller.
1488 */
1489 for (i = 0; i < num_luns; i++) {
1490 int j;
1491
1492 drv_found = 0;
1493
1494 lunid = (0xff &
1495 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1496 lunid |= (0xff &
1497 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1498 lunid |= (0xff &
1499 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1500 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1501
1502 /* Find if the LUN is already in the drive array
1503 * of the controller. If so then update its info
1504 * if not is use. If it does not exist then find
1505 * the first free index and add it.
1506 */
1507 for (j = 0; j <= h->highest_lun; j++) {
1508 if (h->drv[j].LunID == lunid) {
1509 drv_index = j;
1510 drv_found = 1;
1511 }
1512 }
1513
1514 /* check if the drive was found already in the array */
1515 if (!drv_found) {
1516 drv_index = cciss_find_free_drive_index(ctlr);
1517 if (drv_index == -1)
1518 goto freeret;
1519
1520 /*Check if the gendisk needs to be allocated */
1521 if (!h->gendisk[drv_index]){
1522 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1523 if (!h->gendisk[drv_index]){
1524 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1525 goto mem_msg;
1526 }
1527 }
1528 }
1529 h->drv[drv_index].LunID = lunid;
1530 cciss_update_drive_info(ctlr, drv_index);
1531 } /* end for */
1532 } /* end else */
1533
1534 freeret:
1535 kfree(ld_buff);
1536 h->busy_configuring = 0;
1537 /* We return -1 here to tell the ACU that we have registered/updated
1538 * all of the drives that we can and to keep it from calling us
1539 * additional times.
1540 */
1541 return -1;
1542 mem_msg:
1543 printk(KERN_ERR "cciss: out of memory\n");
1544 goto freeret;
1545}
1546
1547/* This function will deregister the disk and it's queue from the
1548 * kernel. It must be called with the controller lock held and the
1549 * drv structures busy_configuring flag set. It's parameters are:
1550 *
1551 * disk = This is the disk to be deregistered
1552 * drv = This is the drive_info_struct associated with the disk to be
1553 * deregistered. It contains information about the disk used
1554 * by the driver.
1555 * clear_all = This flag determines whether or not the disk information
1556 * is going to be completely cleared out and the highest_lun
1557 * reset. Sometimes we want to clear out information about
1558 * the disk in preparation for re-adding it. In this case
1559 * the highest_lun should be left unchanged and the LunID
1560 * should not be cleared.
1561*/
1562static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1563 int clear_all)
1564{
1565 int i;
1566 ctlr_info_t *h = get_host(disk);
1567
1568 if (!capable(CAP_SYS_RAWIO))
1569 return -EPERM;
1570
1571 /* make sure logical volume is NOT is use */
1572 if (clear_all || (h->gendisk[0] == disk)) {
1573 if (drv->usage_count > 1)
1574 return -EBUSY;
1575 } else if (drv->usage_count > 0)
1576 return -EBUSY;
1577
1578 /* invalidate the devices and deregister the disk. If it is disk
1579 * zero do not deregister it but just zero out it's values. This
1580 * allows us to delete disk zero but keep the controller registered.
1581 */
1582 if (h->gendisk[0] != disk) {
1583 if (disk) {
1584 request_queue_t *q = disk->queue;
1585 if (disk->flags & GENHD_FL_UP)
1586 del_gendisk(disk);
1587 if (q) {
1588 blk_cleanup_queue(q);
1589 /* Set drv->queue to NULL so that we do not try
1590 * to call blk_start_queue on this queue in the
1591 * interrupt handler
1592 */
1593 drv->queue = NULL;
1594 }
1595 /* If clear_all is set then we are deleting the logical
1596 * drive, not just refreshing its info. For drives
1597 * other than disk 0 we will call put_disk. We do not
1598 * do this for disk 0 as we need it to be able to
1599 * configure the controller.
1600 */
1601 if (clear_all){
1602 /* This isn't pretty, but we need to find the
1603 * disk in our array and NULL our the pointer.
1604 * This is so that we will call alloc_disk if
1605 * this index is used again later.
1606 */
1607 for (i=0; i < CISS_MAX_LUN; i++){
1608 if(h->gendisk[i] == disk){
1609 h->gendisk[i] = NULL;
1610 break;
1611 }
1612 }
1613 put_disk(disk);
1614 }
1615 }
1616 } else {
1617 set_capacity(disk, 0);
1618 }
1619
1620 --h->num_luns;
1621 /* zero out the disk size info */
1622 drv->nr_blocks = 0;
1623 drv->block_size = 0;
1624 drv->heads = 0;
1625 drv->sectors = 0;
1626 drv->cylinders = 0;
1627 drv->raid_level = -1; /* This can be used as a flag variable to
1628 * indicate that this element of the drive
1629 * array is free.
1630 */
1631
1632 if (clear_all) {
1633 /* check to see if it was the last disk */
1634 if (drv == h->drv + h->highest_lun) {
1635 /* if so, find the new hightest lun */
1636 int i, newhighest = -1;
1637 for (i = 0; i < h->highest_lun; i++) {
1638 /* if the disk has size > 0, it is available */
1639 if (h->drv[i].heads)
1640 newhighest = i;
1641 }
1642 h->highest_lun = newhighest;
1643 }
1644
1645 drv->LunID = 0;
1646 }
1647 return 0;
1648}
1649
1650static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1651 1: address logical volume log_unit,
1652 2: periph device address is scsi3addr */
1653 unsigned int log_unit, __u8 page_code,
1654 unsigned char *scsi3addr, int cmd_type)
1655{
1656 ctlr_info_t *h = hba[ctlr];
1657 u64bit buff_dma_handle;
1658 int status = IO_OK;
1659
1660 c->cmd_type = CMD_IOCTL_PEND;
1661 c->Header.ReplyQueue = 0;
1662 if (buff != NULL) {
1663 c->Header.SGList = 1;
1664 c->Header.SGTotal = 1;
1665 } else {
1666 c->Header.SGList = 0;
1667 c->Header.SGTotal = 0;
1668 }
1669 c->Header.Tag.lower = c->busaddr;
1670
1671 c->Request.Type.Type = cmd_type;
1672 if (cmd_type == TYPE_CMD) {
1673 switch (cmd) {
1674 case CISS_INQUIRY:
1675 /* If the logical unit number is 0 then, this is going
1676 to controller so It's a physical command
1677 mode = 0 target = 0. So we have nothing to write.
1678 otherwise, if use_unit_num == 1,
1679 mode = 1(volume set addressing) target = LUNID
1680 otherwise, if use_unit_num == 2,
1681 mode = 0(periph dev addr) target = scsi3addr */
1682 if (use_unit_num == 1) {
1683 c->Header.LUN.LogDev.VolId =
1684 h->drv[log_unit].LunID;
1685 c->Header.LUN.LogDev.Mode = 1;
1686 } else if (use_unit_num == 2) {
1687 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1688 8);
1689 c->Header.LUN.LogDev.Mode = 0;
1690 }
1691 /* are we trying to read a vital product page */
1692 if (page_code != 0) {
1693 c->Request.CDB[1] = 0x01;
1694 c->Request.CDB[2] = page_code;
1695 }
1696 c->Request.CDBLen = 6;
1697 c->Request.Type.Attribute = ATTR_SIMPLE;
1698 c->Request.Type.Direction = XFER_READ;
1699 c->Request.Timeout = 0;
1700 c->Request.CDB[0] = CISS_INQUIRY;
1701 c->Request.CDB[4] = size & 0xFF;
1702 break;
1703 case CISS_REPORT_LOG:
1704 case CISS_REPORT_PHYS:
1705 /* Talking to controller so It's a physical command
1706 mode = 00 target = 0. Nothing to write.
1707 */
1708 c->Request.CDBLen = 12;
1709 c->Request.Type.Attribute = ATTR_SIMPLE;
1710 c->Request.Type.Direction = XFER_READ;
1711 c->Request.Timeout = 0;
1712 c->Request.CDB[0] = cmd;
1713 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1714 c->Request.CDB[7] = (size >> 16) & 0xFF;
1715 c->Request.CDB[8] = (size >> 8) & 0xFF;
1716 c->Request.CDB[9] = size & 0xFF;
1717 break;
1718
1719 case CCISS_READ_CAPACITY:
1720 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1721 c->Header.LUN.LogDev.Mode = 1;
1722 c->Request.CDBLen = 10;
1723 c->Request.Type.Attribute = ATTR_SIMPLE;
1724 c->Request.Type.Direction = XFER_READ;
1725 c->Request.Timeout = 0;
1726 c->Request.CDB[0] = cmd;
1727 break;
1728 case CCISS_READ_CAPACITY_16:
1729 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1730 c->Header.LUN.LogDev.Mode = 1;
1731 c->Request.CDBLen = 16;
1732 c->Request.Type.Attribute = ATTR_SIMPLE;
1733 c->Request.Type.Direction = XFER_READ;
1734 c->Request.Timeout = 0;
1735 c->Request.CDB[0] = cmd;
1736 c->Request.CDB[1] = 0x10;
1737 c->Request.CDB[10] = (size >> 24) & 0xFF;
1738 c->Request.CDB[11] = (size >> 16) & 0xFF;
1739 c->Request.CDB[12] = (size >> 8) & 0xFF;
1740 c->Request.CDB[13] = size & 0xFF;
1741 c->Request.Timeout = 0;
1742 c->Request.CDB[0] = cmd;
1743 break;
1744 case CCISS_CACHE_FLUSH:
1745 c->Request.CDBLen = 12;
1746 c->Request.Type.Attribute = ATTR_SIMPLE;
1747 c->Request.Type.Direction = XFER_WRITE;
1748 c->Request.Timeout = 0;
1749 c->Request.CDB[0] = BMIC_WRITE;
1750 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1751 break;
1752 default:
1753 printk(KERN_WARNING
1754 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1755 return IO_ERROR;
1756 }
1757 } else if (cmd_type == TYPE_MSG) {
1758 switch (cmd) {
1759 case 0: /* ABORT message */
1760 c->Request.CDBLen = 12;
1761 c->Request.Type.Attribute = ATTR_SIMPLE;
1762 c->Request.Type.Direction = XFER_WRITE;
1763 c->Request.Timeout = 0;
1764 c->Request.CDB[0] = cmd; /* abort */
1765 c->Request.CDB[1] = 0; /* abort a command */
1766 /* buff contains the tag of the command to abort */
1767 memcpy(&c->Request.CDB[4], buff, 8);
1768 break;
1769 case 1: /* RESET message */
1770 c->Request.CDBLen = 12;
1771 c->Request.Type.Attribute = ATTR_SIMPLE;
1772 c->Request.Type.Direction = XFER_WRITE;
1773 c->Request.Timeout = 0;
1774 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1775 c->Request.CDB[0] = cmd; /* reset */
1776 c->Request.CDB[1] = 0x04; /* reset a LUN */
1777 break;
1778 case 3: /* No-Op message */
1779 c->Request.CDBLen = 1;
1780 c->Request.Type.Attribute = ATTR_SIMPLE;
1781 c->Request.Type.Direction = XFER_WRITE;
1782 c->Request.Timeout = 0;
1783 c->Request.CDB[0] = cmd;
1784 break;
1785 default:
1786 printk(KERN_WARNING
1787 "cciss%d: unknown message type %d\n", ctlr, cmd);
1788 return IO_ERROR;
1789 }
1790 } else {
1791 printk(KERN_WARNING
1792 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1793 return IO_ERROR;
1794 }
1795 /* Fill in the scatter gather information */
1796 if (size > 0) {
1797 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1798 buff, size,
1799 PCI_DMA_BIDIRECTIONAL);
1800 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1801 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1802 c->SG[0].Len = size;
1803 c->SG[0].Ext = 0; /* we are not chaining */
1804 }
1805 return status;
1806}
1807
1808static int sendcmd_withirq(__u8 cmd,
1809 int ctlr,
1810 void *buff,
1811 size_t size,
1812 unsigned int use_unit_num,
1813 unsigned int log_unit, __u8 page_code, int cmd_type)
1814{
1815 ctlr_info_t *h = hba[ctlr];
1816 CommandList_struct *c;
1817 u64bit buff_dma_handle;
1818 unsigned long flags;
1819 int return_status;
1820 DECLARE_COMPLETION_ONSTACK(wait);
1821
1822 if ((c = cmd_alloc(h, 0)) == NULL)
1823 return -ENOMEM;
1824 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1825 log_unit, page_code, NULL, cmd_type);
1826 if (return_status != IO_OK) {
1827 cmd_free(h, c, 0);
1828 return return_status;
1829 }
1830 resend_cmd2:
1831 c->waiting = &wait;
1832
1833 /* Put the request on the tail of the queue and send it */
1834 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1835 addQ(&h->reqQ, c);
1836 h->Qdepth++;
1837 start_io(h);
1838 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1839
1840 wait_for_completion(&wait);
1841
1842 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1843 switch (c->err_info->CommandStatus) {
1844 case CMD_TARGET_STATUS:
1845 printk(KERN_WARNING "cciss: cmd %p has "
1846 " completed with errors\n", c);
1847 if (c->err_info->ScsiStatus) {
1848 printk(KERN_WARNING "cciss: cmd %p "
1849 "has SCSI Status = %x\n",
1850 c, c->err_info->ScsiStatus);
1851 }
1852
1853 break;
1854 case CMD_DATA_UNDERRUN:
1855 case CMD_DATA_OVERRUN:
1856 /* expected for inquire and report lun commands */
1857 break;
1858 case CMD_INVALID:
1859 printk(KERN_WARNING "cciss: Cmd %p is "
1860 "reported invalid\n", c);
1861 return_status = IO_ERROR;
1862 break;
1863 case CMD_PROTOCOL_ERR:
1864 printk(KERN_WARNING "cciss: cmd %p has "
1865 "protocol error \n", c);
1866 return_status = IO_ERROR;
1867 break;
1868 case CMD_HARDWARE_ERR:
1869 printk(KERN_WARNING "cciss: cmd %p had "
1870 " hardware error\n", c);
1871 return_status = IO_ERROR;
1872 break;
1873 case CMD_CONNECTION_LOST:
1874 printk(KERN_WARNING "cciss: cmd %p had "
1875 "connection lost\n", c);
1876 return_status = IO_ERROR;
1877 break;
1878 case CMD_ABORTED:
1879 printk(KERN_WARNING "cciss: cmd %p was "
1880 "aborted\n", c);
1881 return_status = IO_ERROR;
1882 break;
1883 case CMD_ABORT_FAILED:
1884 printk(KERN_WARNING "cciss: cmd %p reports "
1885 "abort failed\n", c);
1886 return_status = IO_ERROR;
1887 break;
1888 case CMD_UNSOLICITED_ABORT:
1889 printk(KERN_WARNING
1890 "cciss%d: unsolicited abort %p\n", ctlr, c);
1891 if (c->retry_count < MAX_CMD_RETRIES) {
1892 printk(KERN_WARNING
1893 "cciss%d: retrying %p\n", ctlr, c);
1894 c->retry_count++;
1895 /* erase the old error information */
1896 memset(c->err_info, 0,
1897 sizeof(ErrorInfo_struct));
1898 return_status = IO_OK;
1899 INIT_COMPLETION(wait);
1900 goto resend_cmd2;
1901 }
1902 return_status = IO_ERROR;
1903 break;
1904 default:
1905 printk(KERN_WARNING "cciss: cmd %p returned "
1906 "unknown status %x\n", c,
1907 c->err_info->CommandStatus);
1908 return_status = IO_ERROR;
1909 }
1910 }
1911 /* unlock the buffers from DMA */
1912 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1913 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1914 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1915 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1916 cmd_free(h, c, 0);
1917 return return_status;
1918}
1919
1920static void cciss_geometry_inquiry(int ctlr, int logvol,
1921 int withirq, sector_t total_size,
1922 unsigned int block_size,
1923 InquiryData_struct *inq_buff,
1924 drive_info_struct *drv)
1925{
1926 int return_code;
1927 unsigned long t;
1928
1929 memset(inq_buff, 0, sizeof(InquiryData_struct));
1930 if (withirq)
1931 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1932 inq_buff, sizeof(*inq_buff), 1,
1933 logvol, 0xC1, TYPE_CMD);
1934 else
1935 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1936 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1937 TYPE_CMD);
1938 if (return_code == IO_OK) {
1939 if (inq_buff->data_byte[8] == 0xFF) {
1940 printk(KERN_WARNING
1941 "cciss: reading geometry failed, volume "
1942 "does not support reading geometry\n");
1943 drv->heads = 255;
1944 drv->sectors = 32; // Sectors per track
1945 drv->cylinders = total_size + 1;
1946 drv->raid_level = RAID_UNKNOWN;
1947 } else {
1948 drv->heads = inq_buff->data_byte[6];
1949 drv->sectors = inq_buff->data_byte[7];
1950 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1951 drv->cylinders += inq_buff->data_byte[5];
1952 drv->raid_level = inq_buff->data_byte[8];
1953 }
1954 drv->block_size = block_size;
1955 drv->nr_blocks = total_size + 1;
1956 t = drv->heads * drv->sectors;
1957 if (t > 1) {
1958 sector_t real_size = total_size + 1;
1959 unsigned long rem = sector_div(real_size, t);
1960 if (rem)
1961 real_size++;
1962 drv->cylinders = real_size;
1963 }
1964 } else { /* Get geometry failed */
1965 printk(KERN_WARNING "cciss: reading geometry failed\n");
1966 }
1967 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1968 drv->heads, drv->sectors, drv->cylinders);
1969}
1970
1971static void
1972cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1973 unsigned int *block_size)
1974{
1975 ReadCapdata_struct *buf;
1976 int return_code;
1977 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1978 if (buf == NULL) {
1979 printk(KERN_WARNING "cciss: out of memory\n");
1980 return;
1981 }
1982 memset(buf, 0, sizeof(ReadCapdata_struct));
1983 if (withirq)
1984 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1985 ctlr, buf, sizeof(ReadCapdata_struct),
1986 1, logvol, 0, TYPE_CMD);
1987 else
1988 return_code = sendcmd(CCISS_READ_CAPACITY,
1989 ctlr, buf, sizeof(ReadCapdata_struct),
1990 1, logvol, 0, NULL, TYPE_CMD);
1991 if (return_code == IO_OK) {
1992 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1993 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1994 } else { /* read capacity command failed */
1995 printk(KERN_WARNING "cciss: read capacity failed\n");
1996 *total_size = 0;
1997 *block_size = BLOCK_SIZE;
1998 }
1999 if (*total_size != 0)
2000 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2001 (unsigned long long)*total_size+1, *block_size);
2002 kfree(buf);
2003 return;
2004}
2005
2006static void
2007cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2008{
2009 ReadCapdata_struct_16 *buf;
2010 int return_code;
2011 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2012 if (buf == NULL) {
2013 printk(KERN_WARNING "cciss: out of memory\n");
2014 return;
2015 }
2016 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2017 if (withirq) {
2018 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2019 ctlr, buf, sizeof(ReadCapdata_struct_16),
2020 1, logvol, 0, TYPE_CMD);
2021 }
2022 else {
2023 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2024 ctlr, buf, sizeof(ReadCapdata_struct_16),
2025 1, logvol, 0, NULL, TYPE_CMD);
2026 }
2027 if (return_code == IO_OK) {
2028 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2029 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2030 } else { /* read capacity command failed */
2031 printk(KERN_WARNING "cciss: read capacity failed\n");
2032 *total_size = 0;
2033 *block_size = BLOCK_SIZE;
2034 }
2035 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2036 (unsigned long long)*total_size+1, *block_size);
2037 kfree(buf);
2038 return;
2039}
2040
2041static int cciss_revalidate(struct gendisk *disk)
2042{
2043 ctlr_info_t *h = get_host(disk);
2044 drive_info_struct *drv = get_drv(disk);
2045 int logvol;
2046 int FOUND = 0;
2047 unsigned int block_size;
2048 sector_t total_size;
2049 InquiryData_struct *inq_buff = NULL;
2050
2051 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2052 if (h->drv[logvol].LunID == drv->LunID) {
2053 FOUND = 1;
2054 break;
2055 }
2056 }
2057
2058 if (!FOUND)
2059 return 1;
2060
2061 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2062 if (inq_buff == NULL) {
2063 printk(KERN_WARNING "cciss: out of memory\n");
2064 return 1;
2065 }
2066 if (h->cciss_read == CCISS_READ_10) {
2067 cciss_read_capacity(h->ctlr, logvol, 1,
2068 &total_size, &block_size);
2069 } else {
2070 cciss_read_capacity_16(h->ctlr, logvol, 1,
2071 &total_size, &block_size);
2072 }
2073 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2074 inq_buff, drv);
2075
2076 blk_queue_hardsect_size(drv->queue, drv->block_size);
2077 set_capacity(disk, drv->nr_blocks);
2078
2079 kfree(inq_buff);
2080 return 0;
2081}
2082
2083/*
2084 * Wait polling for a command to complete.
2085 * The memory mapped FIFO is polled for the completion.
2086 * Used only at init time, interrupts from the HBA are disabled.
2087 */
2088static unsigned long pollcomplete(int ctlr)
2089{
2090 unsigned long done;
2091 int i;
2092
2093 /* Wait (up to 20 seconds) for a command to complete */
2094
2095 for (i = 20 * HZ; i > 0; i--) {
2096 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2097 if (done == FIFO_EMPTY)
2098 schedule_timeout_uninterruptible(1);
2099 else
2100 return done;
2101 }
2102 /* Invalid address to tell caller we ran out of time */
2103 return 1;
2104}
2105
2106static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2107{
2108 /* We get in here if sendcmd() is polling for completions
2109 and gets some command back that it wasn't expecting --
2110 something other than that which it just sent down.
2111 Ordinarily, that shouldn't happen, but it can happen when
2112 the scsi tape stuff gets into error handling mode, and
2113 starts using sendcmd() to try to abort commands and
2114 reset tape drives. In that case, sendcmd may pick up
2115 completions of commands that were sent to logical drives
2116 through the block i/o system, or cciss ioctls completing, etc.
2117 In that case, we need to save those completions for later
2118 processing by the interrupt handler.
2119 */
2120
2121#ifdef CONFIG_CISS_SCSI_TAPE
2122 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2123
2124 /* If it's not the scsi tape stuff doing error handling, (abort */
2125 /* or reset) then we don't expect anything weird. */
2126 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2127#endif
2128 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2129 "Invalid command list address returned! (%lx)\n",
2130 ctlr, complete);
2131 /* not much we can do. */
2132#ifdef CONFIG_CISS_SCSI_TAPE
2133 return 1;
2134 }
2135
2136 /* We've sent down an abort or reset, but something else
2137 has completed */
2138 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2139 /* Uh oh. No room to save it for later... */
2140 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2141 "reject list overflow, command lost!\n", ctlr);
2142 return 1;
2143 }
2144 /* Save it for later */
2145 srl->complete[srl->ncompletions] = complete;
2146 srl->ncompletions++;
2147#endif
2148 return 0;
2149}
2150
2151/*
2152 * Send a command to the controller, and wait for it to complete.
2153 * Only used at init time.
2154 */
2155static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2156 1: address logical volume log_unit,
2157 2: periph device address is scsi3addr */
2158 unsigned int log_unit,
2159 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2160{
2161 CommandList_struct *c;
2162 int i;
2163 unsigned long complete;
2164 ctlr_info_t *info_p = hba[ctlr];
2165 u64bit buff_dma_handle;
2166 int status, done = 0;
2167
2168 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2169 printk(KERN_WARNING "cciss: unable to get memory");
2170 return IO_ERROR;
2171 }
2172 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2173 log_unit, page_code, scsi3addr, cmd_type);
2174 if (status != IO_OK) {
2175 cmd_free(info_p, c, 1);
2176 return status;
2177 }
2178 resend_cmd1:
2179 /*
2180 * Disable interrupt
2181 */
2182#ifdef CCISS_DEBUG
2183 printk(KERN_DEBUG "cciss: turning intr off\n");
2184#endif /* CCISS_DEBUG */
2185 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2186
2187 /* Make sure there is room in the command FIFO */
2188 /* Actually it should be completely empty at this time */
2189 /* unless we are in here doing error handling for the scsi */
2190 /* tape side of the driver. */
2191 for (i = 200000; i > 0; i--) {
2192 /* if fifo isn't full go */
2193 if (!(info_p->access.fifo_full(info_p))) {
2194
2195 break;
2196 }
2197 udelay(10);
2198 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2199 " waiting!\n", ctlr);
2200 }
2201 /*
2202 * Send the cmd
2203 */
2204 info_p->access.submit_command(info_p, c);
2205 done = 0;
2206 do {
2207 complete = pollcomplete(ctlr);
2208
2209#ifdef CCISS_DEBUG
2210 printk(KERN_DEBUG "cciss: command completed\n");
2211#endif /* CCISS_DEBUG */
2212
2213 if (complete == 1) {
2214 printk(KERN_WARNING
2215 "cciss cciss%d: SendCmd Timeout out, "
2216 "No command list address returned!\n", ctlr);
2217 status = IO_ERROR;
2218 done = 1;
2219 break;
2220 }
2221
2222 /* This will need to change for direct lookup completions */
2223 if ((complete & CISS_ERROR_BIT)
2224 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2225 /* if data overrun or underun on Report command
2226 ignore it
2227 */
2228 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2229 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2230 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2231 ((c->err_info->CommandStatus ==
2232 CMD_DATA_OVERRUN) ||
2233 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2234 )) {
2235 complete = c->busaddr;
2236 } else {
2237 if (c->err_info->CommandStatus ==
2238 CMD_UNSOLICITED_ABORT) {
2239 printk(KERN_WARNING "cciss%d: "
2240 "unsolicited abort %p\n",
2241 ctlr, c);
2242 if (c->retry_count < MAX_CMD_RETRIES) {
2243 printk(KERN_WARNING
2244 "cciss%d: retrying %p\n",
2245 ctlr, c);
2246 c->retry_count++;
2247 /* erase the old error */
2248 /* information */
2249 memset(c->err_info, 0,
2250 sizeof
2251 (ErrorInfo_struct));
2252 goto resend_cmd1;
2253 } else {
2254 printk(KERN_WARNING
2255 "cciss%d: retried %p too "
2256 "many times\n", ctlr, c);
2257 status = IO_ERROR;
2258 goto cleanup1;
2259 }
2260 } else if (c->err_info->CommandStatus ==
2261 CMD_UNABORTABLE) {
2262 printk(KERN_WARNING
2263 "cciss%d: command could not be aborted.\n",
2264 ctlr);
2265 status = IO_ERROR;
2266 goto cleanup1;
2267 }
2268 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2269 " Error %x \n", ctlr,
2270 c->err_info->CommandStatus);
2271 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2272 " offensive info\n"
2273 " size %x\n num %x value %x\n",
2274 ctlr,
2275 c->err_info->MoreErrInfo.Invalid_Cmd.
2276 offense_size,
2277 c->err_info->MoreErrInfo.Invalid_Cmd.
2278 offense_num,
2279 c->err_info->MoreErrInfo.Invalid_Cmd.
2280 offense_value);
2281 status = IO_ERROR;
2282 goto cleanup1;
2283 }
2284 }
2285 /* This will need changing for direct lookup completions */
2286 if (complete != c->busaddr) {
2287 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2288 BUG(); /* we are pretty much hosed if we get here. */
2289 }
2290 continue;
2291 } else
2292 done = 1;
2293 } while (!done);
2294
2295 cleanup1:
2296 /* unlock the data buffer from DMA */
2297 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2298 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2299 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2300 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2301#ifdef CONFIG_CISS_SCSI_TAPE
2302 /* if we saved some commands for later, process them now. */
2303 if (info_p->scsi_rejects.ncompletions > 0)
2304 do_cciss_intr(0, info_p);
2305#endif
2306 cmd_free(info_p, c, 1);
2307 return status;
2308}
2309
2310/*
2311 * Map (physical) PCI mem into (virtual) kernel space
2312 */
2313static void __iomem *remap_pci_mem(ulong base, ulong size)
2314{
2315 ulong page_base = ((ulong) base) & PAGE_MASK;
2316 ulong page_offs = ((ulong) base) - page_base;
2317 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2318
2319 return page_remapped ? (page_remapped + page_offs) : NULL;
2320}
2321
2322/*
2323 * Takes jobs of the Q and sends them to the hardware, then puts it on
2324 * the Q to wait for completion.
2325 */
2326static void start_io(ctlr_info_t *h)
2327{
2328 CommandList_struct *c;
2329
2330 while ((c = h->reqQ) != NULL) {
2331 /* can't do anything if fifo is full */
2332 if ((h->access.fifo_full(h))) {
2333 printk(KERN_WARNING "cciss: fifo full\n");
2334 break;
2335 }
2336
2337 /* Get the first entry from the Request Q */
2338 removeQ(&(h->reqQ), c);
2339 h->Qdepth--;
2340
2341 /* Tell the controller execute command */
2342 h->access.submit_command(h, c);
2343
2344 /* Put job onto the completed Q */
2345 addQ(&(h->cmpQ), c);
2346 }
2347}
2348
2349/* Assumes that CCISS_LOCK(h->ctlr) is held. */
2350/* Zeros out the error record and then resends the command back */
2351/* to the controller */
2352static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2353{
2354 /* erase the old error information */
2355 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2356
2357 /* add it to software queue and then send it to the controller */
2358 addQ(&(h->reqQ), c);
2359 h->Qdepth++;
2360 if (h->Qdepth > h->maxQsinceinit)
2361 h->maxQsinceinit = h->Qdepth;
2362
2363 start_io(h);
2364}
2365
2366static inline int evaluate_target_status(CommandList_struct *cmd)
2367{
2368 unsigned char sense_key;
2369 int status = 0; /* 0 means bad, 1 means good. */
2370
2371 if (cmd->err_info->ScsiStatus != 0x02) { /* not check condition? */
2372 if (!blk_pc_request(cmd->rq))
2373 printk(KERN_WARNING "cciss: cmd %p "
2374 "has SCSI Status 0x%x\n",
2375 cmd, cmd->err_info->ScsiStatus);
2376 return status;
2377 }
2378
2379 /* check the sense key */
2380 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2381 /* no status or recovered error */
2382 if ((sense_key == 0x0) || (sense_key == 0x1))
2383 status = 1;
2384
2385 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2386 if (status == 0)
2387 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2388 " sense key = 0x%x\n", cmd, sense_key);
2389 return status;
2390 }
2391
2392 /* SG_IO or similar, copy sense data back */
2393 if (cmd->rq->sense) {
2394 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2395 cmd->rq->sense_len = cmd->err_info->SenseLen;
2396 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2397 cmd->rq->sense_len);
2398 } else
2399 cmd->rq->sense_len = 0;
2400
2401 return status;
2402}
2403
2404/* checks the status of the job and calls complete buffers to mark all
2405 * buffers for the completed job. Note that this function does not need
2406 * to hold the hba/queue lock.
2407 */
2408static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2409 int timeout)
2410{
2411 int status = 1;
2412 int retry_cmd = 0;
2413
2414 if (timeout)
2415 status = 0;
2416
2417 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2418 goto after_error_processing;
2419
2420 switch (cmd->err_info->CommandStatus) {
2421 case CMD_TARGET_STATUS:
2422 status = evaluate_target_status(cmd);
2423 break;
2424 case CMD_DATA_UNDERRUN:
2425 if (blk_fs_request(cmd->rq)) {
2426 printk(KERN_WARNING "cciss: cmd %p has"
2427 " completed with data underrun "
2428 "reported\n", cmd);
2429 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2430 }
2431 break;
2432 case CMD_DATA_OVERRUN:
2433 if (blk_fs_request(cmd->rq))
2434 printk(KERN_WARNING "cciss: cmd %p has"
2435 " completed with data overrun "
2436 "reported\n", cmd);
2437 break;
2438 case CMD_INVALID:
2439 printk(KERN_WARNING "cciss: cmd %p is "
2440 "reported invalid\n", cmd);
2441 status = 0;
2442 break;
2443 case CMD_PROTOCOL_ERR:
2444 printk(KERN_WARNING "cciss: cmd %p has "
2445 "protocol error \n", cmd);
2446 status = 0;
2447 break;
2448 case CMD_HARDWARE_ERR:
2449 printk(KERN_WARNING "cciss: cmd %p had "
2450 " hardware error\n", cmd);
2451 status = 0;
2452 break;
2453 case CMD_CONNECTION_LOST:
2454 printk(KERN_WARNING "cciss: cmd %p had "
2455 "connection lost\n", cmd);
2456 status = 0;
2457 break;
2458 case CMD_ABORTED:
2459 printk(KERN_WARNING "cciss: cmd %p was "
2460 "aborted\n", cmd);
2461 status = 0;
2462 break;
2463 case CMD_ABORT_FAILED:
2464 printk(KERN_WARNING "cciss: cmd %p reports "
2465 "abort failed\n", cmd);
2466 status = 0;
2467 break;
2468 case CMD_UNSOLICITED_ABORT:
2469 printk(KERN_WARNING "cciss%d: unsolicited "
2470 "abort %p\n", h->ctlr, cmd);
2471 if (cmd->retry_count < MAX_CMD_RETRIES) {
2472 retry_cmd = 1;
2473 printk(KERN_WARNING
2474 "cciss%d: retrying %p\n", h->ctlr, cmd);
2475 cmd->retry_count++;
2476 } else
2477 printk(KERN_WARNING
2478 "cciss%d: %p retried too "
2479 "many times\n", h->ctlr, cmd);
2480 status = 0;
2481 break;
2482 case CMD_TIMEOUT:
2483 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2484 status = 0;
2485 break;
2486 default:
2487 printk(KERN_WARNING "cciss: cmd %p returned "
2488 "unknown status %x\n", cmd,
2489 cmd->err_info->CommandStatus);
2490 status = 0;
2491 }
2492
2493after_error_processing:
2494
2495 /* We need to return this command */
2496 if (retry_cmd) {
2497 resend_cciss_cmd(h, cmd);
2498 return;
2499 }
2500 cmd->rq->data_len = 0;
2501 cmd->rq->errors = status;
2502 cmd->rq->completion_data = cmd;
2503 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2504 blk_complete_request(cmd->rq);
2505}
2506
2507/*
2508 * Get a request and submit it to the controller.
2509 */
2510static void do_cciss_request(request_queue_t *q)
2511{
2512 ctlr_info_t *h = q->queuedata;
2513 CommandList_struct *c;
2514 sector_t start_blk;
2515 int seg;
2516 struct request *creq;
2517 u64bit temp64;
2518 struct scatterlist tmp_sg[MAXSGENTRIES];
2519 drive_info_struct *drv;
2520 int i, dir;
2521
2522 /* We call start_io here in case there is a command waiting on the
2523 * queue that has not been sent.
2524 */
2525 if (blk_queue_plugged(q))
2526 goto startio;
2527
2528 queue:
2529 creq = elv_next_request(q);
2530 if (!creq)
2531 goto startio;
2532
2533 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2534
2535 if ((c = cmd_alloc(h, 1)) == NULL)
2536 goto full;
2537
2538 blkdev_dequeue_request(creq);
2539
2540 spin_unlock_irq(q->queue_lock);
2541
2542 c->cmd_type = CMD_RWREQ;
2543 c->rq = creq;
2544
2545 /* fill in the request */
2546 drv = creq->rq_disk->private_data;
2547 c->Header.ReplyQueue = 0; // unused in simple mode
2548 /* got command from pool, so use the command block index instead */
2549 /* for direct lookups. */
2550 /* The first 2 bits are reserved for controller error reporting. */
2551 c->Header.Tag.lower = (c->cmdindex << 3);
2552 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2553 c->Header.LUN.LogDev.VolId = drv->LunID;
2554 c->Header.LUN.LogDev.Mode = 1;
2555 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2556 c->Request.Type.Type = TYPE_CMD; // It is a command.
2557 c->Request.Type.Attribute = ATTR_SIMPLE;
2558 c->Request.Type.Direction =
2559 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2560 c->Request.Timeout = 0; // Don't time out
2561 c->Request.CDB[0] =
2562 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2563 start_blk = creq->sector;
2564#ifdef CCISS_DEBUG
2565 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2566 (int)creq->nr_sectors);
2567#endif /* CCISS_DEBUG */
2568
2569 seg = blk_rq_map_sg(q, creq, tmp_sg);
2570
2571 /* get the DMA records for the setup */
2572 if (c->Request.Type.Direction == XFER_READ)
2573 dir = PCI_DMA_FROMDEVICE;
2574 else
2575 dir = PCI_DMA_TODEVICE;
2576
2577 for (i = 0; i < seg; i++) {
2578 c->SG[i].Len = tmp_sg[i].length;
2579 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2580 tmp_sg[i].offset,
2581 tmp_sg[i].length, dir);
2582 c->SG[i].Addr.lower = temp64.val32.lower;
2583 c->SG[i].Addr.upper = temp64.val32.upper;
2584 c->SG[i].Ext = 0; // we are not chaining
2585 }
2586 /* track how many SG entries we are using */
2587 if (seg > h->maxSG)
2588 h->maxSG = seg;
2589
2590#ifdef CCISS_DEBUG
2591 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2592 creq->nr_sectors, seg);
2593#endif /* CCISS_DEBUG */
2594
2595 c->Header.SGList = c->Header.SGTotal = seg;
2596 if (likely(blk_fs_request(creq))) {
2597 if(h->cciss_read == CCISS_READ_10) {
2598 c->Request.CDB[1] = 0;
2599 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2600 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2601 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2602 c->Request.CDB[5] = start_blk & 0xff;
2603 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2604 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2605 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2606 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2607 } else {
2608 c->Request.CDBLen = 16;
2609 c->Request.CDB[1]= 0;
2610 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2611 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2612 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2613 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2614 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2615 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2616 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2617 c->Request.CDB[9]= start_blk & 0xff;
2618 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2619 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2620 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2621 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2622 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2623 }
2624 } else if (blk_pc_request(creq)) {
2625 c->Request.CDBLen = creq->cmd_len;
2626 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2627 } else {
2628 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2629 BUG();
2630 }
2631
2632 spin_lock_irq(q->queue_lock);
2633
2634 addQ(&(h->reqQ), c);
2635 h->Qdepth++;
2636 if (h->Qdepth > h->maxQsinceinit)
2637 h->maxQsinceinit = h->Qdepth;
2638
2639 goto queue;
2640full:
2641 blk_stop_queue(q);
2642startio:
2643 /* We will already have the driver lock here so not need
2644 * to lock it.
2645 */
2646 start_io(h);
2647}
2648
2649static inline unsigned long get_next_completion(ctlr_info_t *h)
2650{
2651#ifdef CONFIG_CISS_SCSI_TAPE
2652 /* Any rejects from sendcmd() lying around? Process them first */
2653 if (h->scsi_rejects.ncompletions == 0)
2654 return h->access.command_completed(h);
2655 else {
2656 struct sendcmd_reject_list *srl;
2657 int n;
2658 srl = &h->scsi_rejects;
2659 n = --srl->ncompletions;
2660 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2661 printk("p");
2662 return srl->complete[n];
2663 }
2664#else
2665 return h->access.command_completed(h);
2666#endif
2667}
2668
2669static inline int interrupt_pending(ctlr_info_t *h)
2670{
2671#ifdef CONFIG_CISS_SCSI_TAPE
2672 return (h->access.intr_pending(h)
2673 || (h->scsi_rejects.ncompletions > 0));
2674#else
2675 return h->access.intr_pending(h);
2676#endif
2677}
2678
2679static inline long interrupt_not_for_us(ctlr_info_t *h)
2680{
2681#ifdef CONFIG_CISS_SCSI_TAPE
2682 return (((h->access.intr_pending(h) == 0) ||
2683 (h->interrupts_enabled == 0))
2684 && (h->scsi_rejects.ncompletions == 0));
2685#else
2686 return (((h->access.intr_pending(h) == 0) ||
2687 (h->interrupts_enabled == 0)));
2688#endif
2689}
2690
2691static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2692{
2693 ctlr_info_t *h = dev_id;
2694 CommandList_struct *c;
2695 unsigned long flags;
2696 __u32 a, a1, a2;
2697
2698 if (interrupt_not_for_us(h))
2699 return IRQ_NONE;
2700 /*
2701 * If there are completed commands in the completion queue,
2702 * we had better do something about it.
2703 */
2704 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2705 while (interrupt_pending(h)) {
2706 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2707 a1 = a;
2708 if ((a & 0x04)) {
2709 a2 = (a >> 3);
2710 if (a2 >= h->nr_cmds) {
2711 printk(KERN_WARNING
2712 "cciss: controller cciss%d failed, stopping.\n",
2713 h->ctlr);
2714 fail_all_cmds(h->ctlr);
2715 return IRQ_HANDLED;
2716 }
2717
2718 c = h->cmd_pool + a2;
2719 a = c->busaddr;
2720
2721 } else {
2722 a &= ~3;
2723 if ((c = h->cmpQ) == NULL) {
2724 printk(KERN_WARNING
2725 "cciss: Completion of %08x ignored\n",
2726 a1);
2727 continue;
2728 }
2729 while (c->busaddr != a) {
2730 c = c->next;
2731 if (c == h->cmpQ)
2732 break;
2733 }
2734 }
2735 /*
2736 * If we've found the command, take it off the
2737 * completion Q and free it
2738 */
2739 if (c->busaddr == a) {
2740 removeQ(&h->cmpQ, c);
2741 if (c->cmd_type == CMD_RWREQ) {
2742 complete_command(h, c, 0);
2743 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2744 complete(c->waiting);
2745 }
2746# ifdef CONFIG_CISS_SCSI_TAPE
2747 else if (c->cmd_type == CMD_SCSI)
2748 complete_scsi_command(c, 0, a1);
2749# endif
2750 continue;
2751 }
2752 }
2753 }
2754
2755 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2756 return IRQ_HANDLED;
2757}
2758
2759/*
2760 * We cannot read the structure directly, for portability we must use
2761 * the io functions.
2762 * This is for debug only.
2763 */
2764#ifdef CCISS_DEBUG
2765static void print_cfg_table(CfgTable_struct *tb)
2766{
2767 int i;
2768 char temp_name[17];
2769
2770 printk("Controller Configuration information\n");
2771 printk("------------------------------------\n");
2772 for (i = 0; i < 4; i++)
2773 temp_name[i] = readb(&(tb->Signature[i]));
2774 temp_name[4] = '\0';
2775 printk(" Signature = %s\n", temp_name);
2776 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2777 printk(" Transport methods supported = 0x%x\n",
2778 readl(&(tb->TransportSupport)));
2779 printk(" Transport methods active = 0x%x\n",
2780 readl(&(tb->TransportActive)));
2781 printk(" Requested transport Method = 0x%x\n",
2782 readl(&(tb->HostWrite.TransportRequest)));
2783 printk(" Coalesce Interrupt Delay = 0x%x\n",
2784 readl(&(tb->HostWrite.CoalIntDelay)));
2785 printk(" Coalesce Interrupt Count = 0x%x\n",
2786 readl(&(tb->HostWrite.CoalIntCount)));
2787 printk(" Max outstanding commands = 0x%d\n",
2788 readl(&(tb->CmdsOutMax)));
2789 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2790 for (i = 0; i < 16; i++)
2791 temp_name[i] = readb(&(tb->ServerName[i]));
2792 temp_name[16] = '\0';
2793 printk(" Server Name = %s\n", temp_name);
2794 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2795}
2796#endif /* CCISS_DEBUG */
2797
2798static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2799{
2800 int i, offset, mem_type, bar_type;
2801 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2802 return 0;
2803 offset = 0;
2804 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2805 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2806 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2807 offset += 4;
2808 else {
2809 mem_type = pci_resource_flags(pdev, i) &
2810 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2811 switch (mem_type) {
2812 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2813 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2814 offset += 4; /* 32 bit */
2815 break;
2816 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2817 offset += 8;
2818 break;
2819 default: /* reserved in PCI 2.2 */
2820 printk(KERN_WARNING
2821 "Base address is invalid\n");
2822 return -1;
2823 break;
2824 }
2825 }
2826 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2827 return i + 1;
2828 }
2829 return -1;
2830}
2831
2832/* If MSI/MSI-X is supported by the kernel we will try to enable it on
2833 * controllers that are capable. If not, we use IO-APIC mode.
2834 */
2835
2836static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2837 struct pci_dev *pdev, __u32 board_id)
2838{
2839#ifdef CONFIG_PCI_MSI
2840 int err;
2841 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2842 {0, 2}, {0, 3}
2843 };
2844
2845 /* Some boards advertise MSI but don't really support it */
2846 if ((board_id == 0x40700E11) ||
2847 (board_id == 0x40800E11) ||
2848 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2849 goto default_int_mode;
2850
2851 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2852 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2853 if (!err) {
2854 c->intr[0] = cciss_msix_entries[0].vector;
2855 c->intr[1] = cciss_msix_entries[1].vector;
2856 c->intr[2] = cciss_msix_entries[2].vector;
2857 c->intr[3] = cciss_msix_entries[3].vector;
2858 c->msix_vector = 1;
2859 return;
2860 }
2861 if (err > 0) {
2862 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2863 "available\n", err);
2864 goto default_int_mode;
2865 } else {
2866 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2867 err);
2868 goto default_int_mode;
2869 }
2870 }
2871 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2872 if (!pci_enable_msi(pdev)) {
2873 c->msi_vector = 1;
2874 } else {
2875 printk(KERN_WARNING "cciss: MSI init failed\n");
2876 }
2877 }
2878default_int_mode:
2879#endif /* CONFIG_PCI_MSI */
2880 /* if we get here we're going to use the default interrupt mode */
2881 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2882 return;
2883}
2884
2885static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2886{
2887 ushort subsystem_vendor_id, subsystem_device_id, command;
2888 __u32 board_id, scratchpad = 0;
2889 __u64 cfg_offset;
2890 __u32 cfg_base_addr;
2891 __u64 cfg_base_addr_index;
2892 int i, err;
2893
2894 /* check to see if controller has been disabled */
2895 /* BEFORE trying to enable it */
2896 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2897 if (!(command & 0x02)) {
2898 printk(KERN_WARNING
2899 "cciss: controller appears to be disabled\n");
2900 return -ENODEV;
2901 }
2902
2903 err = pci_enable_device(pdev);
2904 if (err) {
2905 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2906 return err;
2907 }
2908
2909 err = pci_request_regions(pdev, "cciss");
2910 if (err) {
2911 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2912 "aborting\n");
2913 return err;
2914 }
2915
2916 subsystem_vendor_id = pdev->subsystem_vendor;
2917 subsystem_device_id = pdev->subsystem_device;
2918 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2919 subsystem_vendor_id);
2920
2921#ifdef CCISS_DEBUG
2922 printk("command = %x\n", command);
2923 printk("irq = %x\n", pdev->irq);
2924 printk("board_id = %x\n", board_id);
2925#endif /* CCISS_DEBUG */
2926
2927/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2928 * else we use the IO-APIC interrupt assigned to us by system ROM.
2929 */
2930 cciss_interrupt_mode(c, pdev, board_id);
2931
2932 /*
2933 * Memory base addr is first addr , the second points to the config
2934 * table
2935 */
2936
2937 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2938#ifdef CCISS_DEBUG
2939 printk("address 0 = %x\n", c->paddr);
2940#endif /* CCISS_DEBUG */
2941 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2942
2943 /* Wait for the board to become ready. (PCI hotplug needs this.)
2944 * We poll for up to 120 secs, once per 100ms. */
2945 for (i = 0; i < 1200; i++) {
2946 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2947 if (scratchpad == CCISS_FIRMWARE_READY)
2948 break;
2949 set_current_state(TASK_INTERRUPTIBLE);
2950 schedule_timeout(HZ / 10); /* wait 100ms */
2951 }
2952 if (scratchpad != CCISS_FIRMWARE_READY) {
2953 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2954 err = -ENODEV;
2955 goto err_out_free_res;
2956 }
2957
2958 /* get the address index number */
2959 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2960 cfg_base_addr &= (__u32) 0x0000ffff;
2961#ifdef CCISS_DEBUG
2962 printk("cfg base address = %x\n", cfg_base_addr);
2963#endif /* CCISS_DEBUG */
2964 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2965#ifdef CCISS_DEBUG
2966 printk("cfg base address index = %x\n", cfg_base_addr_index);
2967#endif /* CCISS_DEBUG */
2968 if (cfg_base_addr_index == -1) {
2969 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2970 err = -ENODEV;
2971 goto err_out_free_res;
2972 }
2973
2974 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2975#ifdef CCISS_DEBUG
2976 printk("cfg offset = %x\n", cfg_offset);
2977#endif /* CCISS_DEBUG */
2978 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2979 cfg_base_addr_index) +
2980 cfg_offset, sizeof(CfgTable_struct));
2981 c->board_id = board_id;
2982
2983#ifdef CCISS_DEBUG
2984 print_cfg_table(c->cfgtable);
2985#endif /* CCISS_DEBUG */
2986
2987 for (i = 0; i < ARRAY_SIZE(products); i++) {
2988 if (board_id == products[i].board_id) {
2989 c->product_name = products[i].product_name;
2990 c->access = *(products[i].access);
2991 c->nr_cmds = products[i].nr_cmds;
2992 break;
2993 }
2994 }
2995 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2996 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2997 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2998 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2999 printk("Does not appear to be a valid CISS config table\n");
3000 err = -ENODEV;
3001 goto err_out_free_res;
3002 }
3003 /* We didn't find the controller in our list. We know the
3004 * signature is valid. If it's an HP device let's try to
3005 * bind to the device and fire it up. Otherwise we bail.
3006 */
3007 if (i == ARRAY_SIZE(products)) {
3008 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3009 c->product_name = products[i-1].product_name;
3010 c->access = *(products[i-1].access);
3011 c->nr_cmds = products[i-1].nr_cmds;
3012 printk(KERN_WARNING "cciss: This is an unknown "
3013 "Smart Array controller.\n"
3014 "cciss: Please update to the latest driver "
3015 "available from www.hp.com.\n");
3016 } else {
3017 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3018 " to access the Smart Array controller %08lx\n"
3019 , (unsigned long)board_id);
3020 err = -ENODEV;
3021 goto err_out_free_res;
3022 }
3023 }
3024#ifdef CONFIG_X86
3025 {
3026 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3027 __u32 prefetch;
3028 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3029 prefetch |= 0x100;
3030 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3031 }
3032#endif
3033
3034 /* Disabling DMA prefetch for the P600
3035 * An ASIC bug may result in a prefetch beyond
3036 * physical memory.
3037 */
3038 if(board_id == 0x3225103C) {
3039 __u32 dma_prefetch;
3040 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3041 dma_prefetch |= 0x8000;
3042 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3043 }
3044
3045#ifdef CCISS_DEBUG
3046 printk("Trying to put board into Simple mode\n");
3047#endif /* CCISS_DEBUG */
3048 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3049 /* Update the field, and then ring the doorbell */
3050 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3051 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3052
3053 /* under certain very rare conditions, this can take awhile.
3054 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3055 * as we enter this code.) */
3056 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3057 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3058 break;
3059 /* delay and try again */
3060 set_current_state(TASK_INTERRUPTIBLE);
3061 schedule_timeout(10);
3062 }
3063
3064#ifdef CCISS_DEBUG
3065 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3066 readl(c->vaddr + SA5_DOORBELL));
3067#endif /* CCISS_DEBUG */
3068#ifdef CCISS_DEBUG
3069 print_cfg_table(c->cfgtable);
3070#endif /* CCISS_DEBUG */
3071
3072 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3073 printk(KERN_WARNING "cciss: unable to get board into"
3074 " simple mode\n");
3075 err = -ENODEV;
3076 goto err_out_free_res;
3077 }
3078 return 0;
3079
3080err_out_free_res:
3081 /*
3082 * Deliberately omit pci_disable_device(): it does something nasty to
3083 * Smart Array controllers that pci_enable_device does not undo
3084 */
3085 pci_release_regions(pdev);
3086 return err;
3087}
3088
3089/*
3090 * Gets information about the local volumes attached to the controller.
3091 */
3092static void cciss_getgeometry(int cntl_num)
3093{
3094 ReportLunData_struct *ld_buff;
3095 InquiryData_struct *inq_buff;
3096 int return_code;
3097 int i;
3098 int listlength = 0;
3099 __u32 lunid = 0;
3100 int block_size;
3101 sector_t total_size;
3102
3103 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3104 if (ld_buff == NULL) {
3105 printk(KERN_ERR "cciss: out of memory\n");
3106 return;
3107 }
3108 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3109 if (inq_buff == NULL) {
3110 printk(KERN_ERR "cciss: out of memory\n");
3111 kfree(ld_buff);
3112 return;
3113 }
3114 /* Get the firmware version */
3115 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3116 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3117 TYPE_CMD);
3118 if (return_code == IO_OK) {
3119 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3120 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3121 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3122 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3123 } else { /* send command failed */
3124
3125 printk(KERN_WARNING "cciss: unable to determine firmware"
3126 " version of controller\n");
3127 }
3128 /* Get the number of logical volumes */
3129 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3130 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3131 TYPE_CMD);
3132
3133 if (return_code == IO_OK) {
3134#ifdef CCISS_DEBUG
3135 printk("LUN Data\n--------------------------\n");
3136#endif /* CCISS_DEBUG */
3137
3138 listlength |=
3139 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3140 listlength |=
3141 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3142 listlength |=
3143 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3144 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3145 } else { /* reading number of logical volumes failed */
3146
3147 printk(KERN_WARNING "cciss: report logical volume"
3148 " command failed\n");
3149 listlength = 0;
3150 }
3151 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3152 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3153 printk(KERN_ERR
3154 "ciss: only %d number of logical volumes supported\n",
3155 CISS_MAX_LUN);
3156 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3157 }
3158#ifdef CCISS_DEBUG
3159 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3160 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3161 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3162 hba[cntl_num]->num_luns);
3163#endif /* CCISS_DEBUG */
3164
3165 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3166 for (i = 0; i < CISS_MAX_LUN; i++) {
3167 if (i < hba[cntl_num]->num_luns) {
3168 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3169 << 24;
3170 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3171 << 16;
3172 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3173 << 8;
3174 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3175
3176 hba[cntl_num]->drv[i].LunID = lunid;
3177
3178#ifdef CCISS_DEBUG
3179 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3180 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3181 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3182 hba[cntl_num]->drv[i].LunID);
3183#endif /* CCISS_DEBUG */
3184
3185 /* testing to see if 16-byte CDBs are already being used */
3186 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3187 cciss_read_capacity_16(cntl_num, i, 0,
3188 &total_size, &block_size);
3189 goto geo_inq;
3190 }
3191 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3192
3193 /* If read_capacity returns all F's the logical is >2TB */
3194 /* so we switch to 16-byte CDBs for all read/write ops */
3195 if(total_size == 0xFFFFFFFFULL) {
3196 cciss_read_capacity_16(cntl_num, i, 0,
3197 &total_size, &block_size);
3198 hba[cntl_num]->cciss_read = CCISS_READ_16;
3199 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3200 } else {
3201 hba[cntl_num]->cciss_read = CCISS_READ_10;
3202 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3203 }
3204geo_inq:
3205 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3206 block_size, inq_buff,
3207 &hba[cntl_num]->drv[i]);
3208 } else {
3209 /* initialize raid_level to indicate a free space */
3210 hba[cntl_num]->drv[i].raid_level = -1;
3211 }
3212 }
3213 kfree(ld_buff);
3214 kfree(inq_buff);
3215}
3216
3217/* Function to find the first free pointer into our hba[] array */
3218/* Returns -1 if no free entries are left. */
3219static int alloc_cciss_hba(void)
3220{
3221 int i;
3222
3223 for (i = 0; i < MAX_CTLR; i++) {
3224 if (!hba[i]) {
3225 ctlr_info_t *p;
3226 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3227 if (!p)
3228 goto Enomem;
3229 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3230 if (!p->gendisk[0])
3231 goto Enomem;
3232 hba[i] = p;
3233 return i;
3234 }
3235 }
3236 printk(KERN_WARNING "cciss: This driver supports a maximum"
3237 " of %d controllers.\n", MAX_CTLR);
3238 return -1;
3239Enomem:
3240 printk(KERN_ERR "cciss: out of memory.\n");
3241 return -1;
3242}
3243
3244static void free_hba(int i)
3245{
3246 ctlr_info_t *p = hba[i];
3247 int n;
3248
3249 hba[i] = NULL;
3250 for (n = 0; n < CISS_MAX_LUN; n++)
3251 put_disk(p->gendisk[n]);
3252 kfree(p);
3253}
3254
3255/*
3256 * This is it. Find all the controllers and register them. I really hate
3257 * stealing all these major device numbers.
3258 * returns the number of block devices registered.
3259 */
3260static int __devinit cciss_init_one(struct pci_dev *pdev,
3261 const struct pci_device_id *ent)
3262{
3263 int i;
3264 int j = 0;
3265 int rc;
3266 int dac;
3267
3268 i = alloc_cciss_hba();
3269 if (i < 0)
3270 return -1;
3271
3272 hba[i]->busy_initializing = 1;
3273
3274 if (cciss_pci_init(hba[i], pdev) != 0)
3275 goto clean1;
3276
3277 sprintf(hba[i]->devname, "cciss%d", i);
3278 hba[i]->ctlr = i;
3279 hba[i]->pdev = pdev;
3280
3281 /* configure PCI DMA stuff */
3282 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3283 dac = 1;
3284 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3285 dac = 0;
3286 else {
3287 printk(KERN_ERR "cciss: no suitable DMA available\n");
3288 goto clean1;
3289 }
3290
3291 /*
3292 * register with the major number, or get a dynamic major number
3293 * by passing 0 as argument. This is done for greater than
3294 * 8 controller support.
3295 */
3296 if (i < MAX_CTLR_ORIG)
3297 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3298 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3299 if (rc == -EBUSY || rc == -EINVAL) {
3300 printk(KERN_ERR
3301 "cciss: Unable to get major number %d for %s "
3302 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3303 goto clean1;
3304 } else {
3305 if (i >= MAX_CTLR_ORIG)
3306 hba[i]->major = rc;
3307 }
3308
3309 /* make sure the board interrupts are off */
3310 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3311 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3312 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3313 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3314 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3315 goto clean2;
3316 }
3317
3318 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3319 hba[i]->devname, pdev->device, pci_name(pdev),
3320 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3321
3322 hba[i]->cmd_pool_bits =
3323 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3324 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3325 hba[i]->cmd_pool = (CommandList_struct *)
3326 pci_alloc_consistent(hba[i]->pdev,
3327 hba[i]->nr_cmds * sizeof(CommandList_struct),
3328 &(hba[i]->cmd_pool_dhandle));
3329 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3330 pci_alloc_consistent(hba[i]->pdev,
3331 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3332 &(hba[i]->errinfo_pool_dhandle));
3333 if ((hba[i]->cmd_pool_bits == NULL)
3334 || (hba[i]->cmd_pool == NULL)
3335 || (hba[i]->errinfo_pool == NULL)) {
3336 printk(KERN_ERR "cciss: out of memory");
3337 goto clean4;
3338 }
3339#ifdef CONFIG_CISS_SCSI_TAPE
3340 hba[i]->scsi_rejects.complete =
3341 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3342 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3343 if (hba[i]->scsi_rejects.complete == NULL) {
3344 printk(KERN_ERR "cciss: out of memory");
3345 goto clean4;
3346 }
3347#endif
3348 spin_lock_init(&hba[i]->lock);
3349
3350 /* Initialize the pdev driver private data.
3351 have it point to hba[i]. */
3352 pci_set_drvdata(pdev, hba[i]);
3353 /* command and error info recs zeroed out before
3354 they are used */
3355 memset(hba[i]->cmd_pool_bits, 0,
3356 ((hba[i]->nr_cmds + BITS_PER_LONG -
3357 1) / BITS_PER_LONG) * sizeof(unsigned long));
3358
3359#ifdef CCISS_DEBUG
3360 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3361#endif /* CCISS_DEBUG */
3362
3363 cciss_getgeometry(i);
3364
3365 cciss_scsi_setup(i);
3366
3367 /* Turn the interrupts on so we can service requests */
3368 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3369
3370 cciss_procinit(i);
3371
3372 hba[i]->cciss_max_sectors = 2048;
3373
3374 hba[i]->busy_initializing = 0;
3375
3376 do {
3377 drive_info_struct *drv = &(hba[i]->drv[j]);
3378 struct gendisk *disk = hba[i]->gendisk[j];
3379 request_queue_t *q;
3380
3381 /* Check if the disk was allocated already */
3382 if (!disk){
3383 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3384 disk = hba[i]->gendisk[j];
3385 }
3386
3387 /* Check that the disk was able to be allocated */
3388 if (!disk) {
3389 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3390 goto clean4;
3391 }
3392
3393 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3394 if (!q) {
3395 printk(KERN_ERR
3396 "cciss: unable to allocate queue for disk %d\n",
3397 j);
3398 goto clean4;
3399 }
3400 drv->queue = q;
3401
3402 q->backing_dev_info.ra_pages = READ_AHEAD;
3403 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3404
3405 /* This is a hardware imposed limit. */
3406 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3407
3408 /* This is a limit in the driver and could be eliminated. */
3409 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3410
3411 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3412
3413 blk_queue_softirq_done(q, cciss_softirq_done);
3414
3415 q->queuedata = hba[i];
3416 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3417 disk->major = hba[i]->major;
3418 disk->first_minor = j << NWD_SHIFT;
3419 disk->fops = &cciss_fops;
3420 disk->queue = q;
3421 disk->private_data = drv;
3422 disk->driverfs_dev = &pdev->dev;
3423 /* we must register the controller even if no disks exist */
3424 /* this is for the online array utilities */
3425 if (!drv->heads && j)
3426 continue;
3427 blk_queue_hardsect_size(q, drv->block_size);
3428 set_capacity(disk, drv->nr_blocks);
3429 add_disk(disk);
3430 j++;
3431 } while (j <= hba[i]->highest_lun);
3432
3433 return 1;
3434
3435 clean4:
3436#ifdef CONFIG_CISS_SCSI_TAPE
3437 kfree(hba[i]->scsi_rejects.complete);
3438#endif
3439 kfree(hba[i]->cmd_pool_bits);
3440 if (hba[i]->cmd_pool)
3441 pci_free_consistent(hba[i]->pdev,
3442 hba[i]->nr_cmds * sizeof(CommandList_struct),
3443 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3444 if (hba[i]->errinfo_pool)
3445 pci_free_consistent(hba[i]->pdev,
3446 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3447 hba[i]->errinfo_pool,
3448 hba[i]->errinfo_pool_dhandle);
3449 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3450 clean2:
3451 unregister_blkdev(hba[i]->major, hba[i]->devname);
3452 clean1:
3453 hba[i]->busy_initializing = 0;
3454 /* cleanup any queues that may have been initialized */
3455 for (j=0; j <= hba[i]->highest_lun; j++){
3456 drive_info_struct *drv = &(hba[i]->drv[j]);
3457 if (drv->queue)
3458 blk_cleanup_queue(drv->queue);
3459 }
3460 /*
3461 * Deliberately omit pci_disable_device(): it does something nasty to
3462 * Smart Array controllers that pci_enable_device does not undo
3463 */
3464 pci_release_regions(pdev);
3465 pci_set_drvdata(pdev, NULL);
3466 free_hba(i);
3467 return -1;
3468}
3469
3470static void cciss_remove_one(struct pci_dev *pdev)
3471{
3472 ctlr_info_t *tmp_ptr;
3473 int i, j;
3474 char flush_buf[4];
3475 int return_code;
3476
3477 if (pci_get_drvdata(pdev) == NULL) {
3478 printk(KERN_ERR "cciss: Unable to remove device \n");
3479 return;
3480 }
3481 tmp_ptr = pci_get_drvdata(pdev);
3482 i = tmp_ptr->ctlr;
3483 if (hba[i] == NULL) {
3484 printk(KERN_ERR "cciss: device appears to "
3485 "already be removed \n");
3486 return;
3487 }
3488
3489 remove_proc_entry(hba[i]->devname, proc_cciss);
3490 unregister_blkdev(hba[i]->major, hba[i]->devname);
3491
3492 /* remove it from the disk list */
3493 for (j = 0; j < CISS_MAX_LUN; j++) {
3494 struct gendisk *disk = hba[i]->gendisk[j];
3495 if (disk) {
3496 request_queue_t *q = disk->queue;
3497
3498 if (disk->flags & GENHD_FL_UP)
3499 del_gendisk(disk);
3500 if (q)
3501 blk_cleanup_queue(q);
3502 }
3503 }
3504
3505 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3506
3507 /* Turn board interrupts off and send the flush cache command */
3508 /* sendcmd will turn off interrupt, and send the flush...
3509 * To write all data in the battery backed cache to disks */
3510 memset(flush_buf, 0, 4);
3511 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3512 TYPE_CMD);
3513 if (return_code == IO_OK) {
3514 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3515 } else {
3516 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3517 }
3518 free_irq(hba[i]->intr[2], hba[i]);
3519
3520#ifdef CONFIG_PCI_MSI
3521 if (hba[i]->msix_vector)
3522 pci_disable_msix(hba[i]->pdev);
3523 else if (hba[i]->msi_vector)
3524 pci_disable_msi(hba[i]->pdev);
3525#endif /* CONFIG_PCI_MSI */
3526
3527 iounmap(hba[i]->vaddr);
3528
3529 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3530 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3531 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3532 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3533 kfree(hba[i]->cmd_pool_bits);
3534#ifdef CONFIG_CISS_SCSI_TAPE
3535 kfree(hba[i]->scsi_rejects.complete);
3536#endif
3537 /*
3538 * Deliberately omit pci_disable_device(): it does something nasty to
3539 * Smart Array controllers that pci_enable_device does not undo
3540 */
3541 pci_release_regions(pdev);
3542 pci_set_drvdata(pdev, NULL);
3543 free_hba(i);
3544}
3545
3546static struct pci_driver cciss_pci_driver = {
3547 .name = "cciss",
3548 .probe = cciss_init_one,
3549 .remove = __devexit_p(cciss_remove_one),
3550 .id_table = cciss_pci_device_id, /* id_table */
3551 .shutdown = cciss_remove_one,
3552};
3553
3554/*
3555 * This is it. Register the PCI driver information for the cards we control
3556 * the OS will call our registered routines when it finds one of our cards.
3557 */
3558static int __init cciss_init(void)
3559{
3560 printk(KERN_INFO DRIVER_NAME "\n");
3561
3562 /* Register for our PCI devices */
3563 return pci_register_driver(&cciss_pci_driver);
3564}
3565
3566static void __exit cciss_cleanup(void)
3567{
3568 int i;
3569
3570 pci_unregister_driver(&cciss_pci_driver);
3571 /* double check that all controller entrys have been removed */
3572 for (i = 0; i < MAX_CTLR; i++) {
3573 if (hba[i] != NULL) {
3574 printk(KERN_WARNING "cciss: had to remove"
3575 " controller %d\n", i);
3576 cciss_remove_one(hba[i]->pdev);
3577 }
3578 }
3579 remove_proc_entry("cciss", proc_root_driver);
3580}
3581
3582static void fail_all_cmds(unsigned long ctlr)
3583{
3584 /* If we get here, the board is apparently dead. */
3585 ctlr_info_t *h = hba[ctlr];
3586 CommandList_struct *c;
3587 unsigned long flags;
3588
3589 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3590 h->alive = 0; /* the controller apparently died... */
3591
3592 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3593
3594 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3595
3596 /* move everything off the request queue onto the completed queue */
3597 while ((c = h->reqQ) != NULL) {
3598 removeQ(&(h->reqQ), c);
3599 h->Qdepth--;
3600 addQ(&(h->cmpQ), c);
3601 }
3602
3603 /* Now, fail everything on the completed queue with a HW error */
3604 while ((c = h->cmpQ) != NULL) {
3605 removeQ(&h->cmpQ, c);
3606 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3607 if (c->cmd_type == CMD_RWREQ) {
3608 complete_command(h, c, 0);
3609 } else if (c->cmd_type == CMD_IOCTL_PEND)
3610 complete(c->waiting);
3611#ifdef CONFIG_CISS_SCSI_TAPE
3612 else if (c->cmd_type == CMD_SCSI)
3613 complete_scsi_command(c, 0, 0);
3614#endif
3615 }
3616 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3617 return;
3618}
3619
3620module_init(cciss_init);
3621module_exit(cciss_cleanup);
This page took 0.037378 seconds and 5 git commands to generate.