2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_transport_sas.h>
45 #include <scsi/scsi_dbg.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
59 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
60 * with an optional trailing '-' followed by a byte value (0-255).
62 #define HPSA_DRIVER_VERSION "3.4.14-0"
63 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
66 /* How long to wait for CISS doorbell communication */
67 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
68 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
69 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
70 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
71 #define MAX_IOCTL_CONFIG_WAIT 1000
73 /*define how many times we will try a command because of bus resets */
74 #define MAX_CMD_RETRIES 3
76 /* Embedded module documentation macros - see modules.h */
77 MODULE_AUTHOR("Hewlett-Packard Company");
78 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
81 MODULE_VERSION(HPSA_DRIVER_VERSION
);
82 MODULE_LICENSE("GPL");
84 static int hpsa_allow_any
;
85 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
86 MODULE_PARM_DESC(hpsa_allow_any
,
87 "Allow hpsa driver to access unknown HP Smart Array hardware");
88 static int hpsa_simple_mode
;
89 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
90 MODULE_PARM_DESC(hpsa_simple_mode
,
91 "Use 'simple mode' rather than 'performant mode'");
93 /* define the PCI info for the cards we can control */
94 static const struct pci_device_id hpsa_pci_device_id
[] = {
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
123 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
124 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
125 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
126 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C6},
127 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
128 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
129 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
130 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CA},
131 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CB},
132 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CC},
133 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CD},
134 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CE},
135 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0580},
136 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0581},
137 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0582},
138 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0583},
139 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0584},
140 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0585},
141 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
142 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
143 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
144 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
145 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
146 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
147 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
151 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
153 /* board_id = Subsystem Device ID & Vendor ID
154 * product = Marketing Name for the board
155 * access = Address of the struct of function pointers
157 static struct board_type products
[] = {
158 {0x3241103C, "Smart Array P212", &SA5_access
},
159 {0x3243103C, "Smart Array P410", &SA5_access
},
160 {0x3245103C, "Smart Array P410i", &SA5_access
},
161 {0x3247103C, "Smart Array P411", &SA5_access
},
162 {0x3249103C, "Smart Array P812", &SA5_access
},
163 {0x324A103C, "Smart Array P712m", &SA5_access
},
164 {0x324B103C, "Smart Array P711m", &SA5_access
},
165 {0x3233103C, "HP StorageWorks 1210m", &SA5_access
}, /* alias of 333f */
166 {0x3350103C, "Smart Array P222", &SA5_access
},
167 {0x3351103C, "Smart Array P420", &SA5_access
},
168 {0x3352103C, "Smart Array P421", &SA5_access
},
169 {0x3353103C, "Smart Array P822", &SA5_access
},
170 {0x3354103C, "Smart Array P420i", &SA5_access
},
171 {0x3355103C, "Smart Array P220i", &SA5_access
},
172 {0x3356103C, "Smart Array P721m", &SA5_access
},
173 {0x1921103C, "Smart Array P830i", &SA5_access
},
174 {0x1922103C, "Smart Array P430", &SA5_access
},
175 {0x1923103C, "Smart Array P431", &SA5_access
},
176 {0x1924103C, "Smart Array P830", &SA5_access
},
177 {0x1926103C, "Smart Array P731m", &SA5_access
},
178 {0x1928103C, "Smart Array P230i", &SA5_access
},
179 {0x1929103C, "Smart Array P530", &SA5_access
},
180 {0x21BD103C, "Smart Array P244br", &SA5_access
},
181 {0x21BE103C, "Smart Array P741m", &SA5_access
},
182 {0x21BF103C, "Smart HBA H240ar", &SA5_access
},
183 {0x21C0103C, "Smart Array P440ar", &SA5_access
},
184 {0x21C1103C, "Smart Array P840ar", &SA5_access
},
185 {0x21C2103C, "Smart Array P440", &SA5_access
},
186 {0x21C3103C, "Smart Array P441", &SA5_access
},
187 {0x21C4103C, "Smart Array", &SA5_access
},
188 {0x21C5103C, "Smart Array P841", &SA5_access
},
189 {0x21C6103C, "Smart HBA H244br", &SA5_access
},
190 {0x21C7103C, "Smart HBA H240", &SA5_access
},
191 {0x21C8103C, "Smart HBA H241", &SA5_access
},
192 {0x21C9103C, "Smart Array", &SA5_access
},
193 {0x21CA103C, "Smart Array P246br", &SA5_access
},
194 {0x21CB103C, "Smart Array P840", &SA5_access
},
195 {0x21CC103C, "Smart Array", &SA5_access
},
196 {0x21CD103C, "Smart Array", &SA5_access
},
197 {0x21CE103C, "Smart HBA", &SA5_access
},
198 {0x05809005, "SmartHBA-SA", &SA5_access
},
199 {0x05819005, "SmartHBA-SA 8i", &SA5_access
},
200 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access
},
201 {0x05839005, "SmartHBA-SA 8e", &SA5_access
},
202 {0x05849005, "SmartHBA-SA 16i", &SA5_access
},
203 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access
},
204 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
205 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
206 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
207 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
208 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
209 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
212 static struct scsi_transport_template
*hpsa_sas_transport_template
;
213 static int hpsa_add_sas_host(struct ctlr_info
*h
);
214 static void hpsa_delete_sas_host(struct ctlr_info
*h
);
215 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
216 struct hpsa_scsi_dev_t
*device
);
217 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
);
218 static struct hpsa_scsi_dev_t
219 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
220 struct sas_rphy
*rphy
);
222 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
223 static const struct scsi_cmnd hpsa_cmd_busy
;
224 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
225 static const struct scsi_cmnd hpsa_cmd_idle
;
226 static int number_of_controllers
;
228 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
229 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
230 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
);
233 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
,
237 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
238 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
239 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
);
240 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
241 struct scsi_cmnd
*scmd
);
242 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
243 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
245 static void hpsa_free_cmd_pool(struct ctlr_info
*h
);
246 #define VPD_PAGE (1 << 8)
247 #define HPSA_SIMPLE_ERROR_BITS 0x03
249 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
250 static void hpsa_scan_start(struct Scsi_Host
*);
251 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
252 unsigned long elapsed_time
);
253 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
);
255 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
256 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
257 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
258 static int hpsa_slave_configure(struct scsi_device
*sdev
);
259 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
261 static void hpsa_update_scsi_devices(struct ctlr_info
*h
);
262 static int check_for_unit_attention(struct ctlr_info
*h
,
263 struct CommandList
*c
);
264 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
265 struct CommandList
*c
);
266 /* performant mode helper functions */
267 static void calc_bucket_map(int *bucket
, int num_buckets
,
268 int nsgs
, int min_blocks
, u32
*bucket_map
);
269 static void hpsa_free_performant_mode(struct ctlr_info
*h
);
270 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
271 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
272 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
273 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
275 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
276 unsigned long *memory_bar
);
277 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
278 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
280 static inline void finish_cmd(struct CommandList
*c
);
281 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
282 #define BOARD_NOT_READY 0
283 #define BOARD_READY 1
284 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
285 static void hpsa_flush_cache(struct ctlr_info
*h
);
286 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
287 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
288 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
);
289 static void hpsa_command_resubmit_worker(struct work_struct
*work
);
290 static u32
lockup_detected(struct ctlr_info
*h
);
291 static int detect_controller_lockup(struct ctlr_info
*h
);
292 static void hpsa_disable_rld_caching(struct ctlr_info
*h
);
293 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
294 struct ReportExtendedLUNdata
*buf
, int bufsize
);
295 static int hpsa_luns_changed(struct ctlr_info
*h
);
297 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
299 unsigned long *priv
= shost_priv(sdev
->host
);
300 return (struct ctlr_info
*) *priv
;
303 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
305 unsigned long *priv
= shost_priv(sh
);
306 return (struct ctlr_info
*) *priv
;
309 static inline bool hpsa_is_cmd_idle(struct CommandList
*c
)
311 return c
->scsi_cmd
== SCSI_CMD_IDLE
;
314 static inline bool hpsa_is_pending_event(struct CommandList
*c
)
316 return c
->abort_pending
|| c
->reset_pending
;
319 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
320 static void decode_sense_data(const u8
*sense_data
, int sense_data_len
,
321 u8
*sense_key
, u8
*asc
, u8
*ascq
)
323 struct scsi_sense_hdr sshdr
;
330 if (sense_data_len
< 1)
333 rc
= scsi_normalize_sense(sense_data
, sense_data_len
, &sshdr
);
335 *sense_key
= sshdr
.sense_key
;
341 static int check_for_unit_attention(struct ctlr_info
*h
,
342 struct CommandList
*c
)
344 u8 sense_key
, asc
, ascq
;
347 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
348 sense_len
= sizeof(c
->err_info
->SenseInfo
);
350 sense_len
= c
->err_info
->SenseLen
;
352 decode_sense_data(c
->err_info
->SenseInfo
, sense_len
,
353 &sense_key
, &asc
, &ascq
);
354 if (sense_key
!= UNIT_ATTENTION
|| asc
== 0xff)
359 dev_warn(&h
->pdev
->dev
,
360 "%s: a state change detected, command retried\n",
364 dev_warn(&h
->pdev
->dev
,
365 "%s: LUN failure detected\n", h
->devname
);
367 case REPORT_LUNS_CHANGED
:
368 dev_warn(&h
->pdev
->dev
,
369 "%s: report LUN data changed\n", h
->devname
);
371 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
372 * target (array) devices.
376 dev_warn(&h
->pdev
->dev
,
377 "%s: a power on or device reset detected\n",
380 case UNIT_ATTENTION_CLEARED
:
381 dev_warn(&h
->pdev
->dev
,
382 "%s: unit attention cleared by another initiator\n",
386 dev_warn(&h
->pdev
->dev
,
387 "%s: unknown unit attention detected\n",
394 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
396 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
397 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
398 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
400 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
404 static u32
lockup_detected(struct ctlr_info
*h
);
405 static ssize_t
host_show_lockup_detected(struct device
*dev
,
406 struct device_attribute
*attr
, char *buf
)
410 struct Scsi_Host
*shost
= class_to_shost(dev
);
412 h
= shost_to_hba(shost
);
413 ld
= lockup_detected(h
);
415 return sprintf(buf
, "ld=%d\n", ld
);
418 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
419 struct device_attribute
*attr
,
420 const char *buf
, size_t count
)
424 struct Scsi_Host
*shost
= class_to_shost(dev
);
427 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
429 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
430 strncpy(tmpbuf
, buf
, len
);
432 if (sscanf(tmpbuf
, "%d", &status
) != 1)
434 h
= shost_to_hba(shost
);
435 h
->acciopath_status
= !!status
;
436 dev_warn(&h
->pdev
->dev
,
437 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
438 h
->acciopath_status
? "enabled" : "disabled");
442 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
443 struct device_attribute
*attr
,
444 const char *buf
, size_t count
)
446 int debug_level
, len
;
448 struct Scsi_Host
*shost
= class_to_shost(dev
);
451 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
453 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
454 strncpy(tmpbuf
, buf
, len
);
456 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
460 h
= shost_to_hba(shost
);
461 h
->raid_offload_debug
= debug_level
;
462 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
463 h
->raid_offload_debug
);
467 static ssize_t
host_store_rescan(struct device
*dev
,
468 struct device_attribute
*attr
,
469 const char *buf
, size_t count
)
472 struct Scsi_Host
*shost
= class_to_shost(dev
);
473 h
= shost_to_hba(shost
);
474 hpsa_scan_start(h
->scsi_host
);
478 static ssize_t
host_show_firmware_revision(struct device
*dev
,
479 struct device_attribute
*attr
, char *buf
)
482 struct Scsi_Host
*shost
= class_to_shost(dev
);
483 unsigned char *fwrev
;
485 h
= shost_to_hba(shost
);
486 if (!h
->hba_inquiry_data
)
488 fwrev
= &h
->hba_inquiry_data
[32];
489 return snprintf(buf
, 20, "%c%c%c%c\n",
490 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
493 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
494 struct device_attribute
*attr
, char *buf
)
496 struct Scsi_Host
*shost
= class_to_shost(dev
);
497 struct ctlr_info
*h
= shost_to_hba(shost
);
499 return snprintf(buf
, 20, "%d\n",
500 atomic_read(&h
->commands_outstanding
));
503 static ssize_t
host_show_transport_mode(struct device
*dev
,
504 struct device_attribute
*attr
, char *buf
)
507 struct Scsi_Host
*shost
= class_to_shost(dev
);
509 h
= shost_to_hba(shost
);
510 return snprintf(buf
, 20, "%s\n",
511 h
->transMethod
& CFGTBL_Trans_Performant
?
512 "performant" : "simple");
515 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
516 struct device_attribute
*attr
, char *buf
)
519 struct Scsi_Host
*shost
= class_to_shost(dev
);
521 h
= shost_to_hba(shost
);
522 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
523 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
526 /* List of controllers which cannot be hard reset on kexec with reset_devices */
527 static u32 unresettable_controller
[] = {
528 0x324a103C, /* Smart Array P712m */
529 0x324b103C, /* Smart Array P711m */
530 0x3223103C, /* Smart Array P800 */
531 0x3234103C, /* Smart Array P400 */
532 0x3235103C, /* Smart Array P400i */
533 0x3211103C, /* Smart Array E200i */
534 0x3212103C, /* Smart Array E200 */
535 0x3213103C, /* Smart Array E200i */
536 0x3214103C, /* Smart Array E200i */
537 0x3215103C, /* Smart Array E200i */
538 0x3237103C, /* Smart Array E500 */
539 0x323D103C, /* Smart Array P700m */
540 0x40800E11, /* Smart Array 5i */
541 0x409C0E11, /* Smart Array 6400 */
542 0x409D0E11, /* Smart Array 6400 EM */
543 0x40700E11, /* Smart Array 5300 */
544 0x40820E11, /* Smart Array 532 */
545 0x40830E11, /* Smart Array 5312 */
546 0x409A0E11, /* Smart Array 641 */
547 0x409B0E11, /* Smart Array 642 */
548 0x40910E11, /* Smart Array 6i */
551 /* List of controllers which cannot even be soft reset */
552 static u32 soft_unresettable_controller
[] = {
553 0x40800E11, /* Smart Array 5i */
554 0x40700E11, /* Smart Array 5300 */
555 0x40820E11, /* Smart Array 532 */
556 0x40830E11, /* Smart Array 5312 */
557 0x409A0E11, /* Smart Array 641 */
558 0x409B0E11, /* Smart Array 642 */
559 0x40910E11, /* Smart Array 6i */
560 /* Exclude 640x boards. These are two pci devices in one slot
561 * which share a battery backed cache module. One controls the
562 * cache, the other accesses the cache through the one that controls
563 * it. If we reset the one controlling the cache, the other will
564 * likely not be happy. Just forbid resetting this conjoined mess.
565 * The 640x isn't really supported by hpsa anyway.
567 0x409C0E11, /* Smart Array 6400 */
568 0x409D0E11, /* Smart Array 6400 EM */
571 static u32 needs_abort_tags_swizzled
[] = {
572 0x323D103C, /* Smart Array P700m */
573 0x324a103C, /* Smart Array P712m */
574 0x324b103C, /* SmartArray P711m */
577 static int board_id_in_array(u32 a
[], int nelems
, u32 board_id
)
581 for (i
= 0; i
< nelems
; i
++)
582 if (a
[i
] == board_id
)
587 static int ctlr_is_hard_resettable(u32 board_id
)
589 return !board_id_in_array(unresettable_controller
,
590 ARRAY_SIZE(unresettable_controller
), board_id
);
593 static int ctlr_is_soft_resettable(u32 board_id
)
595 return !board_id_in_array(soft_unresettable_controller
,
596 ARRAY_SIZE(soft_unresettable_controller
), board_id
);
599 static int ctlr_is_resettable(u32 board_id
)
601 return ctlr_is_hard_resettable(board_id
) ||
602 ctlr_is_soft_resettable(board_id
);
605 static int ctlr_needs_abort_tags_swizzled(u32 board_id
)
607 return board_id_in_array(needs_abort_tags_swizzled
,
608 ARRAY_SIZE(needs_abort_tags_swizzled
), board_id
);
611 static ssize_t
host_show_resettable(struct device
*dev
,
612 struct device_attribute
*attr
, char *buf
)
615 struct Scsi_Host
*shost
= class_to_shost(dev
);
617 h
= shost_to_hba(shost
);
618 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
621 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
623 return (scsi3addr
[3] & 0xC0) == 0x40;
626 static const char * const raid_label
[] = { "0", "4", "1(+0)", "5", "5+1", "6",
627 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
629 #define HPSA_RAID_0 0
630 #define HPSA_RAID_4 1
631 #define HPSA_RAID_1 2 /* also used for RAID 10 */
632 #define HPSA_RAID_5 3 /* also used for RAID 50 */
633 #define HPSA_RAID_51 4
634 #define HPSA_RAID_6 5 /* also used for RAID 60 */
635 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
636 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
637 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
639 static inline bool is_logical_device(struct hpsa_scsi_dev_t
*device
)
641 return !device
->physical_device
;
644 static ssize_t
raid_level_show(struct device
*dev
,
645 struct device_attribute
*attr
, char *buf
)
648 unsigned char rlevel
;
650 struct scsi_device
*sdev
;
651 struct hpsa_scsi_dev_t
*hdev
;
654 sdev
= to_scsi_device(dev
);
655 h
= sdev_to_hba(sdev
);
656 spin_lock_irqsave(&h
->lock
, flags
);
657 hdev
= sdev
->hostdata
;
659 spin_unlock_irqrestore(&h
->lock
, flags
);
663 /* Is this even a logical drive? */
664 if (!is_logical_device(hdev
)) {
665 spin_unlock_irqrestore(&h
->lock
, flags
);
666 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
670 rlevel
= hdev
->raid_level
;
671 spin_unlock_irqrestore(&h
->lock
, flags
);
672 if (rlevel
> RAID_UNKNOWN
)
673 rlevel
= RAID_UNKNOWN
;
674 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
678 static ssize_t
lunid_show(struct device
*dev
,
679 struct device_attribute
*attr
, char *buf
)
682 struct scsi_device
*sdev
;
683 struct hpsa_scsi_dev_t
*hdev
;
685 unsigned char lunid
[8];
687 sdev
= to_scsi_device(dev
);
688 h
= sdev_to_hba(sdev
);
689 spin_lock_irqsave(&h
->lock
, flags
);
690 hdev
= sdev
->hostdata
;
692 spin_unlock_irqrestore(&h
->lock
, flags
);
695 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
696 spin_unlock_irqrestore(&h
->lock
, flags
);
697 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
698 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
699 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
702 static ssize_t
unique_id_show(struct device
*dev
,
703 struct device_attribute
*attr
, char *buf
)
706 struct scsi_device
*sdev
;
707 struct hpsa_scsi_dev_t
*hdev
;
709 unsigned char sn
[16];
711 sdev
= to_scsi_device(dev
);
712 h
= sdev_to_hba(sdev
);
713 spin_lock_irqsave(&h
->lock
, flags
);
714 hdev
= sdev
->hostdata
;
716 spin_unlock_irqrestore(&h
->lock
, flags
);
719 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
720 spin_unlock_irqrestore(&h
->lock
, flags
);
721 return snprintf(buf
, 16 * 2 + 2,
722 "%02X%02X%02X%02X%02X%02X%02X%02X"
723 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
724 sn
[0], sn
[1], sn
[2], sn
[3],
725 sn
[4], sn
[5], sn
[6], sn
[7],
726 sn
[8], sn
[9], sn
[10], sn
[11],
727 sn
[12], sn
[13], sn
[14], sn
[15]);
730 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
731 struct device_attribute
*attr
, char *buf
)
734 struct scsi_device
*sdev
;
735 struct hpsa_scsi_dev_t
*hdev
;
739 sdev
= to_scsi_device(dev
);
740 h
= sdev_to_hba(sdev
);
741 spin_lock_irqsave(&h
->lock
, flags
);
742 hdev
= sdev
->hostdata
;
744 spin_unlock_irqrestore(&h
->lock
, flags
);
747 offload_enabled
= hdev
->offload_enabled
;
748 spin_unlock_irqrestore(&h
->lock
, flags
);
749 return snprintf(buf
, 20, "%d\n", offload_enabled
);
753 static ssize_t
path_info_show(struct device
*dev
,
754 struct device_attribute
*attr
, char *buf
)
757 struct scsi_device
*sdev
;
758 struct hpsa_scsi_dev_t
*hdev
;
764 u8 path_map_index
= 0;
766 unsigned char phys_connector
[2];
768 sdev
= to_scsi_device(dev
);
769 h
= sdev_to_hba(sdev
);
770 spin_lock_irqsave(&h
->devlock
, flags
);
771 hdev
= sdev
->hostdata
;
773 spin_unlock_irqrestore(&h
->devlock
, flags
);
778 for (i
= 0; i
< MAX_PATHS
; i
++) {
779 path_map_index
= 1<<i
;
780 if (i
== hdev
->active_path_index
)
782 else if (hdev
->path_map
& path_map_index
)
787 output_len
+= scnprintf(buf
+ output_len
,
788 PAGE_SIZE
- output_len
,
789 "[%d:%d:%d:%d] %20.20s ",
790 h
->scsi_host
->host_no
,
791 hdev
->bus
, hdev
->target
, hdev
->lun
,
792 scsi_device_type(hdev
->devtype
));
794 if (hdev
->devtype
== TYPE_RAID
|| is_logical_device(hdev
)) {
795 output_len
+= scnprintf(buf
+ output_len
,
796 PAGE_SIZE
- output_len
,
802 memcpy(&phys_connector
, &hdev
->phys_connector
[i
],
803 sizeof(phys_connector
));
804 if (phys_connector
[0] < '0')
805 phys_connector
[0] = '0';
806 if (phys_connector
[1] < '0')
807 phys_connector
[1] = '0';
808 output_len
+= scnprintf(buf
+ output_len
,
809 PAGE_SIZE
- output_len
,
812 if (hdev
->devtype
== TYPE_DISK
&& hdev
->expose_device
) {
813 if (box
== 0 || box
== 0xFF) {
814 output_len
+= scnprintf(buf
+ output_len
,
815 PAGE_SIZE
- output_len
,
819 output_len
+= scnprintf(buf
+ output_len
,
820 PAGE_SIZE
- output_len
,
821 "BOX: %hhu BAY: %hhu %s\n",
824 } else if (box
!= 0 && box
!= 0xFF) {
825 output_len
+= scnprintf(buf
+ output_len
,
826 PAGE_SIZE
- output_len
, "BOX: %hhu %s\n",
829 output_len
+= scnprintf(buf
+ output_len
,
830 PAGE_SIZE
- output_len
, "%s\n", active
);
833 spin_unlock_irqrestore(&h
->devlock
, flags
);
837 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
838 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
839 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
840 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
841 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
842 host_show_hp_ssd_smart_path_enabled
, NULL
);
843 static DEVICE_ATTR(path_info
, S_IRUGO
, path_info_show
, NULL
);
844 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
845 host_show_hp_ssd_smart_path_status
,
846 host_store_hp_ssd_smart_path_status
);
847 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
848 host_store_raid_offload_debug
);
849 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
850 host_show_firmware_revision
, NULL
);
851 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
852 host_show_commands_outstanding
, NULL
);
853 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
854 host_show_transport_mode
, NULL
);
855 static DEVICE_ATTR(resettable
, S_IRUGO
,
856 host_show_resettable
, NULL
);
857 static DEVICE_ATTR(lockup_detected
, S_IRUGO
,
858 host_show_lockup_detected
, NULL
);
860 static struct device_attribute
*hpsa_sdev_attrs
[] = {
861 &dev_attr_raid_level
,
864 &dev_attr_hp_ssd_smart_path_enabled
,
869 static struct device_attribute
*hpsa_shost_attrs
[] = {
871 &dev_attr_firmware_revision
,
872 &dev_attr_commands_outstanding
,
873 &dev_attr_transport_mode
,
874 &dev_attr_resettable
,
875 &dev_attr_hp_ssd_smart_path_status
,
876 &dev_attr_raid_offload_debug
,
877 &dev_attr_lockup_detected
,
881 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
882 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
884 static struct scsi_host_template hpsa_driver_template
= {
885 .module
= THIS_MODULE
,
888 .queuecommand
= hpsa_scsi_queue_command
,
889 .scan_start
= hpsa_scan_start
,
890 .scan_finished
= hpsa_scan_finished
,
891 .change_queue_depth
= hpsa_change_queue_depth
,
893 .use_clustering
= ENABLE_CLUSTERING
,
894 .eh_abort_handler
= hpsa_eh_abort_handler
,
895 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
897 .slave_alloc
= hpsa_slave_alloc
,
898 .slave_configure
= hpsa_slave_configure
,
899 .slave_destroy
= hpsa_slave_destroy
,
901 .compat_ioctl
= hpsa_compat_ioctl
,
903 .sdev_attrs
= hpsa_sdev_attrs
,
904 .shost_attrs
= hpsa_shost_attrs
,
909 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
912 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
914 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
915 return h
->access
.command_completed(h
, q
);
917 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
918 return h
->access
.command_completed(h
, q
);
920 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
921 a
= rq
->head
[rq
->current_entry
];
923 atomic_dec(&h
->commands_outstanding
);
927 /* Check for wraparound */
928 if (rq
->current_entry
== h
->max_commands
) {
929 rq
->current_entry
= 0;
936 * There are some special bits in the bus address of the
937 * command that we have to set for the controller to know
938 * how to process the command:
940 * Normal performant mode:
941 * bit 0: 1 means performant mode, 0 means simple mode.
942 * bits 1-3 = block fetch table entry
943 * bits 4-6 = command type (== 0)
946 * bit 0 = "performant mode" bit.
947 * bits 1-3 = block fetch table entry
948 * bits 4-6 = command type (== 110)
949 * (command type is needed because ioaccel1 mode
950 * commands are submitted through the same register as normal
951 * mode commands, so this is how the controller knows whether
952 * the command is normal mode or ioaccel1 mode.)
955 * bit 0 = "performant mode" bit.
956 * bits 1-4 = block fetch table entry (note extra bit)
957 * bits 4-6 = not needed, because ioaccel2 mode has
958 * a separate special register for submitting commands.
962 * set_performant_mode: Modify the tag for cciss performant
963 * set bit 0 for pull model, bits 3-1 for block fetch
966 #define DEFAULT_REPLY_QUEUE (-1)
967 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
,
970 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
971 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
972 if (unlikely(!h
->msix_vector
))
974 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
975 c
->Header
.ReplyQueue
=
976 raw_smp_processor_id() % h
->nreply_queues
;
978 c
->Header
.ReplyQueue
= reply_queue
% h
->nreply_queues
;
982 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
983 struct CommandList
*c
,
986 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
989 * Tell the controller to post the reply to the queue for this
990 * processor. This seems to give the best I/O throughput.
992 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
993 cp
->ReplyQueue
= smp_processor_id() % h
->nreply_queues
;
995 cp
->ReplyQueue
= reply_queue
% h
->nreply_queues
;
997 * Set the bits in the address sent down to include:
998 * - performant mode bit (bit 0)
999 * - pull count (bits 1-3)
1000 * - command type (bits 4-6)
1002 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
1003 IOACCEL1_BUSADDR_CMDTYPE
;
1006 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info
*h
,
1007 struct CommandList
*c
,
1010 struct hpsa_tmf_struct
*cp
= (struct hpsa_tmf_struct
*)
1011 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1013 /* Tell the controller to post the reply to the queue for this
1014 * processor. This seems to give the best I/O throughput.
1016 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1017 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1019 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1020 /* Set the bits in the address sent down to include:
1021 * - performant mode bit not used in ioaccel mode 2
1022 * - pull count (bits 0-3)
1023 * - command type isn't needed for ioaccel2
1025 c
->busaddr
|= h
->ioaccel2_blockFetchTable
[0];
1028 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
1029 struct CommandList
*c
,
1032 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1035 * Tell the controller to post the reply to the queue for this
1036 * processor. This seems to give the best I/O throughput.
1038 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1039 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1041 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1043 * Set the bits in the address sent down to include:
1044 * - performant mode bit not used in ioaccel mode 2
1045 * - pull count (bits 0-3)
1046 * - command type isn't needed for ioaccel2
1048 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
1051 static int is_firmware_flash_cmd(u8
*cdb
)
1053 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
1057 * During firmware flash, the heartbeat register may not update as frequently
1058 * as it should. So we dial down lockup detection during firmware flash. and
1059 * dial it back up when firmware flash completes.
1061 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1062 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1063 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
1064 struct CommandList
*c
)
1066 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
1068 atomic_inc(&h
->firmware_flash_in_progress
);
1069 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
1072 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
1073 struct CommandList
*c
)
1075 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
1076 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
1077 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
1080 static void __enqueue_cmd_and_start_io(struct ctlr_info
*h
,
1081 struct CommandList
*c
, int reply_queue
)
1083 dial_down_lockup_detection_during_fw_flash(h
, c
);
1084 atomic_inc(&h
->commands_outstanding
);
1085 switch (c
->cmd_type
) {
1087 set_ioaccel1_performant_mode(h
, c
, reply_queue
);
1088 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
1091 set_ioaccel2_performant_mode(h
, c
, reply_queue
);
1092 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1095 set_ioaccel2_tmf_performant_mode(h
, c
, reply_queue
);
1096 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1099 set_performant_mode(h
, c
, reply_queue
);
1100 h
->access
.submit_command(h
, c
);
1104 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
, struct CommandList
*c
)
1106 if (unlikely(hpsa_is_pending_event(c
)))
1107 return finish_cmd(c
);
1109 __enqueue_cmd_and_start_io(h
, c
, DEFAULT_REPLY_QUEUE
);
1112 static inline int is_hba_lunid(unsigned char scsi3addr
[])
1114 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
1117 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
1119 if (!h
->hba_inquiry_data
)
1121 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
1126 static int hpsa_find_target_lun(struct ctlr_info
*h
,
1127 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
1129 /* finds an unused bus, target, lun for a new physical device
1130 * assumes h->devlock is held
1133 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
1135 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
1137 for (i
= 0; i
< h
->ndevices
; i
++) {
1138 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
1139 __set_bit(h
->dev
[i
]->target
, lun_taken
);
1142 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
1143 if (i
< HPSA_MAX_DEVICES
) {
1152 static void hpsa_show_dev_msg(const char *level
, struct ctlr_info
*h
,
1153 struct hpsa_scsi_dev_t
*dev
, char *description
)
1155 #define LABEL_SIZE 25
1156 char label
[LABEL_SIZE
];
1158 if (h
== NULL
|| h
->pdev
== NULL
|| h
->scsi_host
== NULL
)
1161 switch (dev
->devtype
) {
1163 snprintf(label
, LABEL_SIZE
, "controller");
1165 case TYPE_ENCLOSURE
:
1166 snprintf(label
, LABEL_SIZE
, "enclosure");
1170 snprintf(label
, LABEL_SIZE
, "external");
1171 else if (!is_logical_dev_addr_mode(dev
->scsi3addr
))
1172 snprintf(label
, LABEL_SIZE
, "%s",
1173 raid_label
[PHYSICAL_DRIVE
]);
1175 snprintf(label
, LABEL_SIZE
, "RAID-%s",
1176 dev
->raid_level
> RAID_UNKNOWN
? "?" :
1177 raid_label
[dev
->raid_level
]);
1180 snprintf(label
, LABEL_SIZE
, "rom");
1183 snprintf(label
, LABEL_SIZE
, "tape");
1185 case TYPE_MEDIUM_CHANGER
:
1186 snprintf(label
, LABEL_SIZE
, "changer");
1189 snprintf(label
, LABEL_SIZE
, "UNKNOWN");
1193 dev_printk(level
, &h
->pdev
->dev
,
1194 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1195 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
1197 scsi_device_type(dev
->devtype
),
1201 dev
->offload_config
? '+' : '-',
1202 dev
->offload_enabled
? '+' : '-',
1203 dev
->expose_device
);
1206 /* Add an entry into h->dev[] array. */
1207 static int hpsa_scsi_add_entry(struct ctlr_info
*h
,
1208 struct hpsa_scsi_dev_t
*device
,
1209 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
1211 /* assumes h->devlock is held */
1212 int n
= h
->ndevices
;
1214 unsigned char addr1
[8], addr2
[8];
1215 struct hpsa_scsi_dev_t
*sd
;
1217 if (n
>= HPSA_MAX_DEVICES
) {
1218 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
1223 /* physical devices do not have lun or target assigned until now. */
1224 if (device
->lun
!= -1)
1225 /* Logical device, lun is already assigned. */
1228 /* If this device a non-zero lun of a multi-lun device
1229 * byte 4 of the 8-byte LUN addr will contain the logical
1230 * unit no, zero otherwise.
1232 if (device
->scsi3addr
[4] == 0) {
1233 /* This is not a non-zero lun of a multi-lun device */
1234 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
1235 device
->bus
, &device
->target
, &device
->lun
) != 0)
1240 /* This is a non-zero lun of a multi-lun device.
1241 * Search through our list and find the device which
1242 * has the same 8 byte LUN address, excepting byte 4 and 5.
1243 * Assign the same bus and target for this new LUN.
1244 * Use the logical unit number from the firmware.
1246 memcpy(addr1
, device
->scsi3addr
, 8);
1249 for (i
= 0; i
< n
; i
++) {
1251 memcpy(addr2
, sd
->scsi3addr
, 8);
1254 /* differ only in byte 4 and 5? */
1255 if (memcmp(addr1
, addr2
, 8) == 0) {
1256 device
->bus
= sd
->bus
;
1257 device
->target
= sd
->target
;
1258 device
->lun
= device
->scsi3addr
[4];
1262 if (device
->lun
== -1) {
1263 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
1264 " suspect firmware bug or unsupported hardware "
1265 "configuration.\n");
1273 added
[*nadded
] = device
;
1275 hpsa_show_dev_msg(KERN_INFO
, h
, device
,
1276 device
->expose_device
? "added" : "masked");
1277 device
->offload_to_be_enabled
= device
->offload_enabled
;
1278 device
->offload_enabled
= 0;
1282 /* Update an entry in h->dev[] array. */
1283 static void hpsa_scsi_update_entry(struct ctlr_info
*h
,
1284 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
1286 int offload_enabled
;
1287 /* assumes h->devlock is held */
1288 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1290 /* Raid level changed. */
1291 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
1293 /* Raid offload parameters changed. Careful about the ordering. */
1294 if (new_entry
->offload_config
&& new_entry
->offload_enabled
) {
1296 * if drive is newly offload_enabled, we want to copy the
1297 * raid map data first. If previously offload_enabled and
1298 * offload_config were set, raid map data had better be
1299 * the same as it was before. if raid map data is changed
1300 * then it had better be the case that
1301 * h->dev[entry]->offload_enabled is currently 0.
1303 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
1304 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1306 if (new_entry
->hba_ioaccel_enabled
) {
1307 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1308 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1310 h
->dev
[entry
]->hba_ioaccel_enabled
= new_entry
->hba_ioaccel_enabled
;
1311 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
1312 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
1313 h
->dev
[entry
]->queue_depth
= new_entry
->queue_depth
;
1316 * We can turn off ioaccel offload now, but need to delay turning
1317 * it on until we can update h->dev[entry]->phys_disk[], but we
1318 * can't do that until all the devices are updated.
1320 h
->dev
[entry
]->offload_to_be_enabled
= new_entry
->offload_enabled
;
1321 if (!new_entry
->offload_enabled
)
1322 h
->dev
[entry
]->offload_enabled
= 0;
1324 offload_enabled
= h
->dev
[entry
]->offload_enabled
;
1325 h
->dev
[entry
]->offload_enabled
= h
->dev
[entry
]->offload_to_be_enabled
;
1326 hpsa_show_dev_msg(KERN_INFO
, h
, h
->dev
[entry
], "updated");
1327 h
->dev
[entry
]->offload_enabled
= offload_enabled
;
1330 /* Replace an entry from h->dev[] array. */
1331 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
,
1332 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
1333 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
1334 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1336 /* assumes h->devlock is held */
1337 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1338 removed
[*nremoved
] = h
->dev
[entry
];
1342 * New physical devices won't have target/lun assigned yet
1343 * so we need to preserve the values in the slot we are replacing.
1345 if (new_entry
->target
== -1) {
1346 new_entry
->target
= h
->dev
[entry
]->target
;
1347 new_entry
->lun
= h
->dev
[entry
]->lun
;
1350 h
->dev
[entry
] = new_entry
;
1351 added
[*nadded
] = new_entry
;
1353 hpsa_show_dev_msg(KERN_INFO
, h
, new_entry
, "replaced");
1354 new_entry
->offload_to_be_enabled
= new_entry
->offload_enabled
;
1355 new_entry
->offload_enabled
= 0;
1358 /* Remove an entry from h->dev[] array. */
1359 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int entry
,
1360 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1362 /* assumes h->devlock is held */
1364 struct hpsa_scsi_dev_t
*sd
;
1366 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1369 removed
[*nremoved
] = h
->dev
[entry
];
1372 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1373 h
->dev
[i
] = h
->dev
[i
+1];
1375 hpsa_show_dev_msg(KERN_INFO
, h
, sd
, "removed");
1378 #define SCSI3ADDR_EQ(a, b) ( \
1379 (a)[7] == (b)[7] && \
1380 (a)[6] == (b)[6] && \
1381 (a)[5] == (b)[5] && \
1382 (a)[4] == (b)[4] && \
1383 (a)[3] == (b)[3] && \
1384 (a)[2] == (b)[2] && \
1385 (a)[1] == (b)[1] && \
1388 static void fixup_botched_add(struct ctlr_info
*h
,
1389 struct hpsa_scsi_dev_t
*added
)
1391 /* called when scsi_add_device fails in order to re-adjust
1392 * h->dev[] to match the mid layer's view.
1394 unsigned long flags
;
1397 spin_lock_irqsave(&h
->lock
, flags
);
1398 for (i
= 0; i
< h
->ndevices
; i
++) {
1399 if (h
->dev
[i
] == added
) {
1400 for (j
= i
; j
< h
->ndevices
-1; j
++)
1401 h
->dev
[j
] = h
->dev
[j
+1];
1406 spin_unlock_irqrestore(&h
->lock
, flags
);
1410 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1411 struct hpsa_scsi_dev_t
*dev2
)
1413 /* we compare everything except lun and target as these
1414 * are not yet assigned. Compare parts likely
1417 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1418 sizeof(dev1
->scsi3addr
)) != 0)
1420 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1421 sizeof(dev1
->device_id
)) != 0)
1423 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1425 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1427 if (dev1
->devtype
!= dev2
->devtype
)
1429 if (dev1
->bus
!= dev2
->bus
)
1434 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1435 struct hpsa_scsi_dev_t
*dev2
)
1437 /* Device attributes that can change, but don't mean
1438 * that the device is a different device, nor that the OS
1439 * needs to be told anything about the change.
1441 if (dev1
->raid_level
!= dev2
->raid_level
)
1443 if (dev1
->offload_config
!= dev2
->offload_config
)
1445 if (dev1
->offload_enabled
!= dev2
->offload_enabled
)
1447 if (!is_logical_dev_addr_mode(dev1
->scsi3addr
))
1448 if (dev1
->queue_depth
!= dev2
->queue_depth
)
1453 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1454 * and return needle location in *index. If scsi3addr matches, but not
1455 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1456 * location in *index.
1457 * In the case of a minor device attribute change, such as RAID level, just
1458 * return DEVICE_UPDATED, along with the updated device's location in index.
1459 * If needle not found, return DEVICE_NOT_FOUND.
1461 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1462 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1466 #define DEVICE_NOT_FOUND 0
1467 #define DEVICE_CHANGED 1
1468 #define DEVICE_SAME 2
1469 #define DEVICE_UPDATED 3
1471 return DEVICE_NOT_FOUND
;
1473 for (i
= 0; i
< haystack_size
; i
++) {
1474 if (haystack
[i
] == NULL
) /* previously removed. */
1476 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1478 if (device_is_the_same(needle
, haystack
[i
])) {
1479 if (device_updated(needle
, haystack
[i
]))
1480 return DEVICE_UPDATED
;
1483 /* Keep offline devices offline */
1484 if (needle
->volume_offline
)
1485 return DEVICE_NOT_FOUND
;
1486 return DEVICE_CHANGED
;
1491 return DEVICE_NOT_FOUND
;
1494 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1495 unsigned char scsi3addr
[])
1497 struct offline_device_entry
*device
;
1498 unsigned long flags
;
1500 /* Check to see if device is already on the list */
1501 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1502 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1503 if (memcmp(device
->scsi3addr
, scsi3addr
,
1504 sizeof(device
->scsi3addr
)) == 0) {
1505 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1509 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1511 /* Device is not on the list, add it. */
1512 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1514 dev_warn(&h
->pdev
->dev
, "out of memory in %s\n", __func__
);
1517 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1518 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1519 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1520 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1523 /* Print a message explaining various offline volume states */
1524 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1525 struct hpsa_scsi_dev_t
*sd
)
1527 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1528 dev_info(&h
->pdev
->dev
,
1529 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1530 h
->scsi_host
->host_no
,
1531 sd
->bus
, sd
->target
, sd
->lun
);
1532 switch (sd
->volume_offline
) {
1535 case HPSA_LV_UNDERGOING_ERASE
:
1536 dev_info(&h
->pdev
->dev
,
1537 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1538 h
->scsi_host
->host_no
,
1539 sd
->bus
, sd
->target
, sd
->lun
);
1541 case HPSA_LV_NOT_AVAILABLE
:
1542 dev_info(&h
->pdev
->dev
,
1543 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1544 h
->scsi_host
->host_no
,
1545 sd
->bus
, sd
->target
, sd
->lun
);
1547 case HPSA_LV_UNDERGOING_RPI
:
1548 dev_info(&h
->pdev
->dev
,
1549 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1550 h
->scsi_host
->host_no
,
1551 sd
->bus
, sd
->target
, sd
->lun
);
1553 case HPSA_LV_PENDING_RPI
:
1554 dev_info(&h
->pdev
->dev
,
1555 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1556 h
->scsi_host
->host_no
,
1557 sd
->bus
, sd
->target
, sd
->lun
);
1559 case HPSA_LV_ENCRYPTED_NO_KEY
:
1560 dev_info(&h
->pdev
->dev
,
1561 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1562 h
->scsi_host
->host_no
,
1563 sd
->bus
, sd
->target
, sd
->lun
);
1565 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1566 dev_info(&h
->pdev
->dev
,
1567 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1568 h
->scsi_host
->host_no
,
1569 sd
->bus
, sd
->target
, sd
->lun
);
1571 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1572 dev_info(&h
->pdev
->dev
,
1573 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1574 h
->scsi_host
->host_no
,
1575 sd
->bus
, sd
->target
, sd
->lun
);
1577 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1578 dev_info(&h
->pdev
->dev
,
1579 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1580 h
->scsi_host
->host_no
,
1581 sd
->bus
, sd
->target
, sd
->lun
);
1583 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1584 dev_info(&h
->pdev
->dev
,
1585 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1586 h
->scsi_host
->host_no
,
1587 sd
->bus
, sd
->target
, sd
->lun
);
1589 case HPSA_LV_PENDING_ENCRYPTION
:
1590 dev_info(&h
->pdev
->dev
,
1591 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1592 h
->scsi_host
->host_no
,
1593 sd
->bus
, sd
->target
, sd
->lun
);
1595 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1596 dev_info(&h
->pdev
->dev
,
1597 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1598 h
->scsi_host
->host_no
,
1599 sd
->bus
, sd
->target
, sd
->lun
);
1605 * Figure the list of physical drive pointers for a logical drive with
1606 * raid offload configured.
1608 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info
*h
,
1609 struct hpsa_scsi_dev_t
*dev
[], int ndevices
,
1610 struct hpsa_scsi_dev_t
*logical_drive
)
1612 struct raid_map_data
*map
= &logical_drive
->raid_map
;
1613 struct raid_map_disk_data
*dd
= &map
->data
[0];
1615 int total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
1616 le16_to_cpu(map
->metadata_disks_per_row
);
1617 int nraid_map_entries
= le16_to_cpu(map
->row_cnt
) *
1618 le16_to_cpu(map
->layout_map_count
) *
1619 total_disks_per_row
;
1620 int nphys_disk
= le16_to_cpu(map
->layout_map_count
) *
1621 total_disks_per_row
;
1624 if (nraid_map_entries
> RAID_MAP_MAX_ENTRIES
)
1625 nraid_map_entries
= RAID_MAP_MAX_ENTRIES
;
1627 logical_drive
->nphysical_disks
= nraid_map_entries
;
1630 for (i
= 0; i
< nraid_map_entries
; i
++) {
1631 logical_drive
->phys_disk
[i
] = NULL
;
1632 if (!logical_drive
->offload_config
)
1634 for (j
= 0; j
< ndevices
; j
++) {
1637 if (dev
[j
]->devtype
!= TYPE_DISK
)
1639 if (is_logical_device(dev
[j
]))
1641 if (dev
[j
]->ioaccel_handle
!= dd
[i
].ioaccel_handle
)
1644 logical_drive
->phys_disk
[i
] = dev
[j
];
1646 qdepth
= min(h
->nr_cmds
, qdepth
+
1647 logical_drive
->phys_disk
[i
]->queue_depth
);
1652 * This can happen if a physical drive is removed and
1653 * the logical drive is degraded. In that case, the RAID
1654 * map data will refer to a physical disk which isn't actually
1655 * present. And in that case offload_enabled should already
1656 * be 0, but we'll turn it off here just in case
1658 if (!logical_drive
->phys_disk
[i
]) {
1659 logical_drive
->offload_enabled
= 0;
1660 logical_drive
->offload_to_be_enabled
= 0;
1661 logical_drive
->queue_depth
= 8;
1664 if (nraid_map_entries
)
1666 * This is correct for reads, too high for full stripe writes,
1667 * way too high for partial stripe writes
1669 logical_drive
->queue_depth
= qdepth
;
1671 logical_drive
->queue_depth
= h
->nr_cmds
;
1674 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info
*h
,
1675 struct hpsa_scsi_dev_t
*dev
[], int ndevices
)
1679 for (i
= 0; i
< ndevices
; i
++) {
1682 if (dev
[i
]->devtype
!= TYPE_DISK
)
1684 if (!is_logical_device(dev
[i
]))
1688 * If offload is currently enabled, the RAID map and
1689 * phys_disk[] assignment *better* not be changing
1690 * and since it isn't changing, we do not need to
1693 if (dev
[i
]->offload_enabled
)
1696 hpsa_figure_phys_disk_ptrs(h
, dev
, ndevices
, dev
[i
]);
1700 static int hpsa_add_device(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1707 if (is_logical_device(device
)) /* RAID */
1708 rc
= scsi_add_device(h
->scsi_host
, device
->bus
,
1709 device
->target
, device
->lun
);
1711 rc
= hpsa_add_sas_device(h
->sas_host
, device
);
1716 static void hpsa_remove_device(struct ctlr_info
*h
,
1717 struct hpsa_scsi_dev_t
*device
)
1719 struct scsi_device
*sdev
= NULL
;
1724 if (is_logical_device(device
)) { /* RAID */
1725 sdev
= scsi_device_lookup(h
->scsi_host
, device
->bus
,
1726 device
->target
, device
->lun
);
1728 scsi_remove_device(sdev
);
1729 scsi_device_put(sdev
);
1732 * We don't expect to get here. Future commands
1733 * to this device will get a selection timeout as
1734 * if the device were gone.
1736 hpsa_show_dev_msg(KERN_WARNING
, h
, device
,
1737 "didn't find device for removal.");
1740 hpsa_remove_sas_device(device
);
1743 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
,
1744 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1746 /* sd contains scsi3 addresses and devtypes, and inquiry
1747 * data. This function takes what's in sd to be the current
1748 * reality and updates h->dev[] to reflect that reality.
1750 int i
, entry
, device_change
, changes
= 0;
1751 struct hpsa_scsi_dev_t
*csd
;
1752 unsigned long flags
;
1753 struct hpsa_scsi_dev_t
**added
, **removed
;
1754 int nadded
, nremoved
;
1757 * A reset can cause a device status to change
1758 * re-schedule the scan to see what happened.
1760 if (h
->reset_in_progress
) {
1761 h
->drv_req_rescan
= 1;
1765 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1766 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1768 if (!added
|| !removed
) {
1769 dev_warn(&h
->pdev
->dev
, "out of memory in "
1770 "adjust_hpsa_scsi_table\n");
1774 spin_lock_irqsave(&h
->devlock
, flags
);
1776 /* find any devices in h->dev[] that are not in
1777 * sd[] and remove them from h->dev[], and for any
1778 * devices which have changed, remove the old device
1779 * info and add the new device info.
1780 * If minor device attributes change, just update
1781 * the existing device structure.
1786 while (i
< h
->ndevices
) {
1788 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1789 if (device_change
== DEVICE_NOT_FOUND
) {
1791 hpsa_scsi_remove_entry(h
, i
, removed
, &nremoved
);
1792 continue; /* remove ^^^, hence i not incremented */
1793 } else if (device_change
== DEVICE_CHANGED
) {
1795 hpsa_scsi_replace_entry(h
, i
, sd
[entry
],
1796 added
, &nadded
, removed
, &nremoved
);
1797 /* Set it to NULL to prevent it from being freed
1798 * at the bottom of hpsa_update_scsi_devices()
1801 } else if (device_change
== DEVICE_UPDATED
) {
1802 hpsa_scsi_update_entry(h
, i
, sd
[entry
]);
1807 /* Now, make sure every device listed in sd[] is also
1808 * listed in h->dev[], adding them if they aren't found
1811 for (i
= 0; i
< nsds
; i
++) {
1812 if (!sd
[i
]) /* if already added above. */
1815 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1816 * as the SCSI mid-layer does not handle such devices well.
1817 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1818 * at 160Hz, and prevents the system from coming up.
1820 if (sd
[i
]->volume_offline
) {
1821 hpsa_show_volume_status(h
, sd
[i
]);
1822 hpsa_show_dev_msg(KERN_INFO
, h
, sd
[i
], "offline");
1826 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1827 h
->ndevices
, &entry
);
1828 if (device_change
== DEVICE_NOT_FOUND
) {
1830 if (hpsa_scsi_add_entry(h
, sd
[i
], added
, &nadded
) != 0)
1832 sd
[i
] = NULL
; /* prevent from being freed later. */
1833 } else if (device_change
== DEVICE_CHANGED
) {
1834 /* should never happen... */
1836 dev_warn(&h
->pdev
->dev
,
1837 "device unexpectedly changed.\n");
1838 /* but if it does happen, we just ignore that device */
1841 hpsa_update_log_drive_phys_drive_ptrs(h
, h
->dev
, h
->ndevices
);
1843 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1844 * any logical drives that need it enabled.
1846 for (i
= 0; i
< h
->ndevices
; i
++) {
1847 if (h
->dev
[i
] == NULL
)
1849 h
->dev
[i
]->offload_enabled
= h
->dev
[i
]->offload_to_be_enabled
;
1852 spin_unlock_irqrestore(&h
->devlock
, flags
);
1854 /* Monitor devices which are in one of several NOT READY states to be
1855 * brought online later. This must be done without holding h->devlock,
1856 * so don't touch h->dev[]
1858 for (i
= 0; i
< nsds
; i
++) {
1859 if (!sd
[i
]) /* if already added above. */
1861 if (sd
[i
]->volume_offline
)
1862 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
1865 /* Don't notify scsi mid layer of any changes the first time through
1866 * (or if there are no changes) scsi_scan_host will do it later the
1867 * first time through.
1872 /* Notify scsi mid layer of any removed devices */
1873 for (i
= 0; i
< nremoved
; i
++) {
1874 if (removed
[i
] == NULL
)
1876 if (removed
[i
]->expose_device
)
1877 hpsa_remove_device(h
, removed
[i
]);
1882 /* Notify scsi mid layer of any added devices */
1883 for (i
= 0; i
< nadded
; i
++) {
1886 if (added
[i
] == NULL
)
1888 if (!(added
[i
]->expose_device
))
1890 rc
= hpsa_add_device(h
, added
[i
]);
1893 dev_warn(&h
->pdev
->dev
,
1894 "addition failed %d, device not added.", rc
);
1895 /* now we have to remove it from h->dev,
1896 * since it didn't get added to scsi mid layer
1898 fixup_botched_add(h
, added
[i
]);
1899 h
->drv_req_rescan
= 1;
1908 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1909 * Assume's h->devlock is held.
1911 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1912 int bus
, int target
, int lun
)
1915 struct hpsa_scsi_dev_t
*sd
;
1917 for (i
= 0; i
< h
->ndevices
; i
++) {
1919 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1925 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1927 struct hpsa_scsi_dev_t
*sd
;
1928 unsigned long flags
;
1929 struct ctlr_info
*h
;
1931 h
= sdev_to_hba(sdev
);
1932 spin_lock_irqsave(&h
->devlock
, flags
);
1933 if (sdev_channel(sdev
) == HPSA_PHYSICAL_DEVICE_BUS
) {
1934 struct scsi_target
*starget
;
1935 struct sas_rphy
*rphy
;
1937 starget
= scsi_target(sdev
);
1938 rphy
= target_to_rphy(starget
);
1939 sd
= hpsa_find_device_by_sas_rphy(h
, rphy
);
1941 sd
->target
= sdev_id(sdev
);
1942 sd
->lun
= sdev
->lun
;
1945 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1946 sdev_id(sdev
), sdev
->lun
);
1948 if (sd
&& sd
->expose_device
) {
1949 atomic_set(&sd
->ioaccel_cmds_out
, 0);
1950 sdev
->hostdata
= sd
;
1952 sdev
->hostdata
= NULL
;
1953 spin_unlock_irqrestore(&h
->devlock
, flags
);
1957 /* configure scsi device based on internal per-device structure */
1958 static int hpsa_slave_configure(struct scsi_device
*sdev
)
1960 struct hpsa_scsi_dev_t
*sd
;
1963 sd
= sdev
->hostdata
;
1964 sdev
->no_uld_attach
= !sd
|| !sd
->expose_device
;
1967 queue_depth
= sd
->queue_depth
!= 0 ?
1968 sd
->queue_depth
: sdev
->host
->can_queue
;
1970 queue_depth
= sdev
->host
->can_queue
;
1972 scsi_change_queue_depth(sdev
, queue_depth
);
1977 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1979 /* nothing to do. */
1982 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
1986 if (!h
->ioaccel2_cmd_sg_list
)
1988 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1989 kfree(h
->ioaccel2_cmd_sg_list
[i
]);
1990 h
->ioaccel2_cmd_sg_list
[i
] = NULL
;
1992 kfree(h
->ioaccel2_cmd_sg_list
);
1993 h
->ioaccel2_cmd_sg_list
= NULL
;
1996 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
2000 if (h
->chainsize
<= 0)
2003 h
->ioaccel2_cmd_sg_list
=
2004 kzalloc(sizeof(*h
->ioaccel2_cmd_sg_list
) * h
->nr_cmds
,
2006 if (!h
->ioaccel2_cmd_sg_list
)
2008 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2009 h
->ioaccel2_cmd_sg_list
[i
] =
2010 kmalloc(sizeof(*h
->ioaccel2_cmd_sg_list
[i
]) *
2011 h
->maxsgentries
, GFP_KERNEL
);
2012 if (!h
->ioaccel2_cmd_sg_list
[i
])
2018 hpsa_free_ioaccel2_sg_chain_blocks(h
);
2022 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
2026 if (!h
->cmd_sg_list
)
2028 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2029 kfree(h
->cmd_sg_list
[i
]);
2030 h
->cmd_sg_list
[i
] = NULL
;
2032 kfree(h
->cmd_sg_list
);
2033 h
->cmd_sg_list
= NULL
;
2036 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info
*h
)
2040 if (h
->chainsize
<= 0)
2043 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
2045 if (!h
->cmd_sg_list
) {
2046 dev_err(&h
->pdev
->dev
, "Failed to allocate SG list\n");
2049 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2050 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
2051 h
->chainsize
, GFP_KERNEL
);
2052 if (!h
->cmd_sg_list
[i
]) {
2053 dev_err(&h
->pdev
->dev
, "Failed to allocate cmd SG\n");
2060 hpsa_free_sg_chain_blocks(h
);
2064 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2065 struct io_accel2_cmd
*cp
, struct CommandList
*c
)
2067 struct ioaccel2_sg_element
*chain_block
;
2071 chain_block
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
2072 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2073 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_size
,
2075 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2076 /* prevent subsequent unmapping */
2077 cp
->sg
->address
= 0;
2080 cp
->sg
->address
= cpu_to_le64(temp64
);
2084 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2085 struct io_accel2_cmd
*cp
)
2087 struct ioaccel2_sg_element
*chain_sg
;
2092 temp64
= le64_to_cpu(chain_sg
->address
);
2093 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2094 pci_unmap_single(h
->pdev
, temp64
, chain_size
, PCI_DMA_TODEVICE
);
2097 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
2098 struct CommandList
*c
)
2100 struct SGDescriptor
*chain_sg
, *chain_block
;
2104 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2105 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
2106 chain_sg
->Ext
= cpu_to_le32(HPSA_SG_CHAIN
);
2107 chain_len
= sizeof(*chain_sg
) *
2108 (le16_to_cpu(c
->Header
.SGTotal
) - h
->max_cmd_sg_entries
);
2109 chain_sg
->Len
= cpu_to_le32(chain_len
);
2110 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_len
,
2112 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2113 /* prevent subsequent unmapping */
2114 chain_sg
->Addr
= cpu_to_le64(0);
2117 chain_sg
->Addr
= cpu_to_le64(temp64
);
2121 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
2122 struct CommandList
*c
)
2124 struct SGDescriptor
*chain_sg
;
2126 if (le16_to_cpu(c
->Header
.SGTotal
) <= h
->max_cmd_sg_entries
)
2129 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2130 pci_unmap_single(h
->pdev
, le64_to_cpu(chain_sg
->Addr
),
2131 le32_to_cpu(chain_sg
->Len
), PCI_DMA_TODEVICE
);
2135 /* Decode the various types of errors on ioaccel2 path.
2136 * Return 1 for any error that should generate a RAID path retry.
2137 * Return 0 for errors that don't require a RAID path retry.
2139 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
2140 struct CommandList
*c
,
2141 struct scsi_cmnd
*cmd
,
2142 struct io_accel2_cmd
*c2
)
2146 u32 ioaccel2_resid
= 0;
2148 switch (c2
->error_data
.serv_response
) {
2149 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
2150 switch (c2
->error_data
.status
) {
2151 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
2153 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
2154 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
2155 if (c2
->error_data
.data_present
!=
2156 IOACCEL2_SENSE_DATA_PRESENT
) {
2157 memset(cmd
->sense_buffer
, 0,
2158 SCSI_SENSE_BUFFERSIZE
);
2161 /* copy the sense data */
2162 data_len
= c2
->error_data
.sense_data_len
;
2163 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
2164 data_len
= SCSI_SENSE_BUFFERSIZE
;
2165 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
2167 sizeof(c2
->error_data
.sense_data_buff
);
2168 memcpy(cmd
->sense_buffer
,
2169 c2
->error_data
.sense_data_buff
, data_len
);
2172 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
2175 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
2178 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
2181 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
2189 case IOACCEL2_SERV_RESPONSE_FAILURE
:
2190 switch (c2
->error_data
.status
) {
2191 case IOACCEL2_STATUS_SR_IO_ERROR
:
2192 case IOACCEL2_STATUS_SR_IO_ABORTED
:
2193 case IOACCEL2_STATUS_SR_OVERRUN
:
2196 case IOACCEL2_STATUS_SR_UNDERRUN
:
2197 cmd
->result
= (DID_OK
<< 16); /* host byte */
2198 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2199 ioaccel2_resid
= get_unaligned_le32(
2200 &c2
->error_data
.resid_cnt
[0]);
2201 scsi_set_resid(cmd
, ioaccel2_resid
);
2203 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE
:
2204 case IOACCEL2_STATUS_SR_INVALID_DEVICE
:
2205 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED
:
2206 /* We will get an event from ctlr to trigger rescan */
2213 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
2215 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
2217 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
2220 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
2227 return retry
; /* retry on raid path? */
2230 static void hpsa_cmd_resolve_events(struct ctlr_info
*h
,
2231 struct CommandList
*c
)
2233 bool do_wake
= false;
2236 * Prevent the following race in the abort handler:
2238 * 1. LLD is requested to abort a SCSI command
2239 * 2. The SCSI command completes
2240 * 3. The struct CommandList associated with step 2 is made available
2241 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2242 * 5. Abort handler follows scsi_cmnd->host_scribble and
2243 * finds struct CommandList and tries to aborts it
2244 * Now we have aborted the wrong command.
2246 * Reset c->scsi_cmd here so that the abort or reset handler will know
2247 * this command has completed. Then, check to see if the handler is
2248 * waiting for this command, and, if so, wake it.
2250 c
->scsi_cmd
= SCSI_CMD_IDLE
;
2251 mb(); /* Declare command idle before checking for pending events. */
2252 if (c
->abort_pending
) {
2254 c
->abort_pending
= false;
2256 if (c
->reset_pending
) {
2257 unsigned long flags
;
2258 struct hpsa_scsi_dev_t
*dev
;
2261 * There appears to be a reset pending; lock the lock and
2262 * reconfirm. If so, then decrement the count of outstanding
2263 * commands and wake the reset command if this is the last one.
2265 spin_lock_irqsave(&h
->lock
, flags
);
2266 dev
= c
->reset_pending
; /* Re-fetch under the lock. */
2267 if (dev
&& atomic_dec_and_test(&dev
->reset_cmds_out
))
2269 c
->reset_pending
= NULL
;
2270 spin_unlock_irqrestore(&h
->lock
, flags
);
2274 wake_up_all(&h
->event_sync_wait_queue
);
2277 static void hpsa_cmd_resolve_and_free(struct ctlr_info
*h
,
2278 struct CommandList
*c
)
2280 hpsa_cmd_resolve_events(h
, c
);
2281 cmd_tagged_free(h
, c
);
2284 static void hpsa_cmd_free_and_done(struct ctlr_info
*h
,
2285 struct CommandList
*c
, struct scsi_cmnd
*cmd
)
2287 hpsa_cmd_resolve_and_free(h
, c
);
2288 cmd
->scsi_done(cmd
);
2291 static void hpsa_retry_cmd(struct ctlr_info
*h
, struct CommandList
*c
)
2293 INIT_WORK(&c
->work
, hpsa_command_resubmit_worker
);
2294 queue_work_on(raw_smp_processor_id(), h
->resubmit_wq
, &c
->work
);
2297 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd
*cmd
)
2299 cmd
->result
= DID_ABORT
<< 16;
2302 static void hpsa_cmd_abort_and_free(struct ctlr_info
*h
, struct CommandList
*c
,
2303 struct scsi_cmnd
*cmd
)
2305 hpsa_set_scsi_cmd_aborted(cmd
);
2306 dev_warn(&h
->pdev
->dev
, "CDB %16phN was aborted with status 0x%x\n",
2307 c
->Request
.CDB
, c
->err_info
->ScsiStatus
);
2308 hpsa_cmd_resolve_and_free(h
, c
);
2311 static void process_ioaccel2_completion(struct ctlr_info
*h
,
2312 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
2313 struct hpsa_scsi_dev_t
*dev
)
2315 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2317 /* check for good status */
2318 if (likely(c2
->error_data
.serv_response
== 0 &&
2319 c2
->error_data
.status
== 0))
2320 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2323 * Any RAID offload error results in retry which will use
2324 * the normal I/O path so the controller can handle whatever's
2327 if (is_logical_device(dev
) &&
2328 c2
->error_data
.serv_response
==
2329 IOACCEL2_SERV_RESPONSE_FAILURE
) {
2330 if (c2
->error_data
.status
==
2331 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
)
2332 dev
->offload_enabled
= 0;
2334 return hpsa_retry_cmd(h
, c
);
2337 if (handle_ioaccel_mode2_error(h
, c
, cmd
, c2
))
2338 return hpsa_retry_cmd(h
, c
);
2340 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2343 /* Returns 0 on success, < 0 otherwise. */
2344 static int hpsa_evaluate_tmf_status(struct ctlr_info
*h
,
2345 struct CommandList
*cp
)
2347 u8 tmf_status
= cp
->err_info
->ScsiStatus
;
2349 switch (tmf_status
) {
2350 case CISS_TMF_COMPLETE
:
2352 * CISS_TMF_COMPLETE never happens, instead,
2353 * ei->CommandStatus == 0 for this case.
2355 case CISS_TMF_SUCCESS
:
2357 case CISS_TMF_INVALID_FRAME
:
2358 case CISS_TMF_NOT_SUPPORTED
:
2359 case CISS_TMF_FAILED
:
2360 case CISS_TMF_WRONG_LUN
:
2361 case CISS_TMF_OVERLAPPED_TAG
:
2364 dev_warn(&h
->pdev
->dev
, "Unknown TMF status: 0x%02x\n",
2371 static void complete_scsi_command(struct CommandList
*cp
)
2373 struct scsi_cmnd
*cmd
;
2374 struct ctlr_info
*h
;
2375 struct ErrorInfo
*ei
;
2376 struct hpsa_scsi_dev_t
*dev
;
2377 struct io_accel2_cmd
*c2
;
2380 u8 asc
; /* additional sense code */
2381 u8 ascq
; /* additional sense code qualifier */
2382 unsigned long sense_data_size
;
2387 dev
= cmd
->device
->hostdata
;
2388 c2
= &h
->ioaccel2_cmd_pool
[cp
->cmdindex
];
2390 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
2391 if ((cp
->cmd_type
== CMD_SCSI
) &&
2392 (le16_to_cpu(cp
->Header
.SGTotal
) > h
->max_cmd_sg_entries
))
2393 hpsa_unmap_sg_chain_block(h
, cp
);
2395 if ((cp
->cmd_type
== CMD_IOACCEL2
) &&
2396 (c2
->sg
[0].chain_indicator
== IOACCEL2_CHAIN
))
2397 hpsa_unmap_ioaccel2_sg_chain_block(h
, c2
);
2399 cmd
->result
= (DID_OK
<< 16); /* host byte */
2400 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2402 if (cp
->cmd_type
== CMD_IOACCEL2
|| cp
->cmd_type
== CMD_IOACCEL1
)
2403 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
2406 * We check for lockup status here as it may be set for
2407 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2408 * fail_all_oustanding_cmds()
2410 if (unlikely(ei
->CommandStatus
== CMD_CTLR_LOCKUP
)) {
2411 /* DID_NO_CONNECT will prevent a retry */
2412 cmd
->result
= DID_NO_CONNECT
<< 16;
2413 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2416 if ((unlikely(hpsa_is_pending_event(cp
)))) {
2417 if (cp
->reset_pending
)
2418 return hpsa_cmd_resolve_and_free(h
, cp
);
2419 if (cp
->abort_pending
)
2420 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2423 if (cp
->cmd_type
== CMD_IOACCEL2
)
2424 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
2426 scsi_set_resid(cmd
, ei
->ResidualCnt
);
2427 if (ei
->CommandStatus
== 0)
2428 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2430 /* For I/O accelerator commands, copy over some fields to the normal
2431 * CISS header used below for error handling.
2433 if (cp
->cmd_type
== CMD_IOACCEL1
) {
2434 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
2435 cp
->Header
.SGList
= scsi_sg_count(cmd
);
2436 cp
->Header
.SGTotal
= cpu_to_le16(cp
->Header
.SGList
);
2437 cp
->Request
.CDBLen
= le16_to_cpu(c
->io_flags
) &
2438 IOACCEL1_IOFLAGS_CDBLEN_MASK
;
2439 cp
->Header
.tag
= c
->tag
;
2440 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
2441 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
2443 /* Any RAID offload error results in retry which will use
2444 * the normal I/O path so the controller can handle whatever's
2447 if (is_logical_device(dev
)) {
2448 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
2449 dev
->offload_enabled
= 0;
2450 return hpsa_retry_cmd(h
, cp
);
2454 /* an error has occurred */
2455 switch (ei
->CommandStatus
) {
2457 case CMD_TARGET_STATUS
:
2458 cmd
->result
|= ei
->ScsiStatus
;
2459 /* copy the sense data */
2460 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
2461 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
2463 sense_data_size
= sizeof(ei
->SenseInfo
);
2464 if (ei
->SenseLen
< sense_data_size
)
2465 sense_data_size
= ei
->SenseLen
;
2466 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
2468 decode_sense_data(ei
->SenseInfo
, sense_data_size
,
2469 &sense_key
, &asc
, &ascq
);
2470 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
2471 if (sense_key
== ABORTED_COMMAND
) {
2472 cmd
->result
|= DID_SOFT_ERROR
<< 16;
2477 /* Problem was not a check condition
2478 * Pass it up to the upper layers...
2480 if (ei
->ScsiStatus
) {
2481 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
2482 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2483 "Returning result: 0x%x\n",
2485 sense_key
, asc
, ascq
,
2487 } else { /* scsi status is zero??? How??? */
2488 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
2489 "Returning no connection.\n", cp
),
2491 /* Ordinarily, this case should never happen,
2492 * but there is a bug in some released firmware
2493 * revisions that allows it to happen if, for
2494 * example, a 4100 backplane loses power and
2495 * the tape drive is in it. We assume that
2496 * it's a fatal error of some kind because we
2497 * can't show that it wasn't. We will make it
2498 * look like selection timeout since that is
2499 * the most common reason for this to occur,
2500 * and it's severe enough.
2503 cmd
->result
= DID_NO_CONNECT
<< 16;
2507 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2509 case CMD_DATA_OVERRUN
:
2510 dev_warn(&h
->pdev
->dev
,
2511 "CDB %16phN data overrun\n", cp
->Request
.CDB
);
2514 /* print_bytes(cp, sizeof(*cp), 1, 0);
2516 /* We get CMD_INVALID if you address a non-existent device
2517 * instead of a selection timeout (no response). You will
2518 * see this if you yank out a drive, then try to access it.
2519 * This is kind of a shame because it means that any other
2520 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2521 * missing target. */
2522 cmd
->result
= DID_NO_CONNECT
<< 16;
2525 case CMD_PROTOCOL_ERR
:
2526 cmd
->result
= DID_ERROR
<< 16;
2527 dev_warn(&h
->pdev
->dev
, "CDB %16phN : protocol error\n",
2530 case CMD_HARDWARE_ERR
:
2531 cmd
->result
= DID_ERROR
<< 16;
2532 dev_warn(&h
->pdev
->dev
, "CDB %16phN : hardware error\n",
2535 case CMD_CONNECTION_LOST
:
2536 cmd
->result
= DID_ERROR
<< 16;
2537 dev_warn(&h
->pdev
->dev
, "CDB %16phN : connection lost\n",
2541 /* Return now to avoid calling scsi_done(). */
2542 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2543 case CMD_ABORT_FAILED
:
2544 cmd
->result
= DID_ERROR
<< 16;
2545 dev_warn(&h
->pdev
->dev
, "CDB %16phN : abort failed\n",
2548 case CMD_UNSOLICITED_ABORT
:
2549 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
2550 dev_warn(&h
->pdev
->dev
, "CDB %16phN : unsolicited abort\n",
2554 cmd
->result
= DID_TIME_OUT
<< 16;
2555 dev_warn(&h
->pdev
->dev
, "CDB %16phN timed out\n",
2558 case CMD_UNABORTABLE
:
2559 cmd
->result
= DID_ERROR
<< 16;
2560 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
2562 case CMD_TMF_STATUS
:
2563 if (hpsa_evaluate_tmf_status(h
, cp
)) /* TMF failed? */
2564 cmd
->result
= DID_ERROR
<< 16;
2566 case CMD_IOACCEL_DISABLED
:
2567 /* This only handles the direct pass-through case since RAID
2568 * offload is handled above. Just attempt a retry.
2570 cmd
->result
= DID_SOFT_ERROR
<< 16;
2571 dev_warn(&h
->pdev
->dev
,
2572 "cp %p had HP SSD Smart Path error\n", cp
);
2575 cmd
->result
= DID_ERROR
<< 16;
2576 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
2577 cp
, ei
->CommandStatus
);
2580 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2583 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
2584 struct CommandList
*c
, int sg_used
, int data_direction
)
2588 for (i
= 0; i
< sg_used
; i
++)
2589 pci_unmap_single(pdev
, (dma_addr_t
) le64_to_cpu(c
->SG
[i
].Addr
),
2590 le32_to_cpu(c
->SG
[i
].Len
),
2594 static int hpsa_map_one(struct pci_dev
*pdev
,
2595 struct CommandList
*cp
,
2602 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
2603 cp
->Header
.SGList
= 0;
2604 cp
->Header
.SGTotal
= cpu_to_le16(0);
2608 addr64
= pci_map_single(pdev
, buf
, buflen
, data_direction
);
2609 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
2610 /* Prevent subsequent unmap of something never mapped */
2611 cp
->Header
.SGList
= 0;
2612 cp
->Header
.SGTotal
= cpu_to_le16(0);
2615 cp
->SG
[0].Addr
= cpu_to_le64(addr64
);
2616 cp
->SG
[0].Len
= cpu_to_le32(buflen
);
2617 cp
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* we are not chaining */
2618 cp
->Header
.SGList
= 1; /* no. SGs contig in this cmd */
2619 cp
->Header
.SGTotal
= cpu_to_le16(1); /* total sgs in cmd list */
2623 #define NO_TIMEOUT ((unsigned long) -1)
2624 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2625 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
2626 struct CommandList
*c
, int reply_queue
, unsigned long timeout_msecs
)
2628 DECLARE_COMPLETION_ONSTACK(wait
);
2631 __enqueue_cmd_and_start_io(h
, c
, reply_queue
);
2632 if (timeout_msecs
== NO_TIMEOUT
) {
2633 /* TODO: get rid of this no-timeout thing */
2634 wait_for_completion_io(&wait
);
2637 if (!wait_for_completion_io_timeout(&wait
,
2638 msecs_to_jiffies(timeout_msecs
))) {
2639 dev_warn(&h
->pdev
->dev
, "Command timed out.\n");
2645 static int hpsa_scsi_do_simple_cmd(struct ctlr_info
*h
, struct CommandList
*c
,
2646 int reply_queue
, unsigned long timeout_msecs
)
2648 if (unlikely(lockup_detected(h
))) {
2649 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
2652 return hpsa_scsi_do_simple_cmd_core(h
, c
, reply_queue
, timeout_msecs
);
2655 static u32
lockup_detected(struct ctlr_info
*h
)
2658 u32 rc
, *lockup_detected
;
2661 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
2662 rc
= *lockup_detected
;
2667 #define MAX_DRIVER_CMD_RETRIES 25
2668 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2669 struct CommandList
*c
, int data_direction
, unsigned long timeout_msecs
)
2671 int backoff_time
= 10, retry_count
= 0;
2675 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2676 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
2681 if (retry_count
> 3) {
2682 msleep(backoff_time
);
2683 if (backoff_time
< 1000)
2686 } while ((check_for_unit_attention(h
, c
) ||
2687 check_for_busy(h
, c
)) &&
2688 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2689 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2690 if (retry_count
> MAX_DRIVER_CMD_RETRIES
)
2695 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2696 struct CommandList
*c
)
2698 const u8
*cdb
= c
->Request
.CDB
;
2699 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2701 dev_warn(&h
->pdev
->dev
, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2702 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2703 txt
, lun
[0], lun
[1], lun
[2], lun
[3],
2704 lun
[4], lun
[5], lun
[6], lun
[7],
2705 cdb
[0], cdb
[1], cdb
[2], cdb
[3],
2706 cdb
[4], cdb
[5], cdb
[6], cdb
[7],
2707 cdb
[8], cdb
[9], cdb
[10], cdb
[11],
2708 cdb
[12], cdb
[13], cdb
[14], cdb
[15]);
2711 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2712 struct CommandList
*cp
)
2714 const struct ErrorInfo
*ei
= cp
->err_info
;
2715 struct device
*d
= &cp
->h
->pdev
->dev
;
2716 u8 sense_key
, asc
, ascq
;
2719 switch (ei
->CommandStatus
) {
2720 case CMD_TARGET_STATUS
:
2721 if (ei
->SenseLen
> sizeof(ei
->SenseInfo
))
2722 sense_len
= sizeof(ei
->SenseInfo
);
2724 sense_len
= ei
->SenseLen
;
2725 decode_sense_data(ei
->SenseInfo
, sense_len
,
2726 &sense_key
, &asc
, &ascq
);
2727 hpsa_print_cmd(h
, "SCSI status", cp
);
2728 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2729 dev_warn(d
, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2730 sense_key
, asc
, ascq
);
2732 dev_warn(d
, "SCSI Status = 0x%02x\n", ei
->ScsiStatus
);
2733 if (ei
->ScsiStatus
== 0)
2734 dev_warn(d
, "SCSI status is abnormally zero. "
2735 "(probably indicates selection timeout "
2736 "reported incorrectly due to a known "
2737 "firmware bug, circa July, 2001.)\n");
2739 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2741 case CMD_DATA_OVERRUN
:
2742 hpsa_print_cmd(h
, "overrun condition", cp
);
2745 /* controller unfortunately reports SCSI passthru's
2746 * to non-existent targets as invalid commands.
2748 hpsa_print_cmd(h
, "invalid command", cp
);
2749 dev_warn(d
, "probably means device no longer present\n");
2752 case CMD_PROTOCOL_ERR
:
2753 hpsa_print_cmd(h
, "protocol error", cp
);
2755 case CMD_HARDWARE_ERR
:
2756 hpsa_print_cmd(h
, "hardware error", cp
);
2758 case CMD_CONNECTION_LOST
:
2759 hpsa_print_cmd(h
, "connection lost", cp
);
2762 hpsa_print_cmd(h
, "aborted", cp
);
2764 case CMD_ABORT_FAILED
:
2765 hpsa_print_cmd(h
, "abort failed", cp
);
2767 case CMD_UNSOLICITED_ABORT
:
2768 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2771 hpsa_print_cmd(h
, "timed out", cp
);
2773 case CMD_UNABORTABLE
:
2774 hpsa_print_cmd(h
, "unabortable", cp
);
2776 case CMD_CTLR_LOCKUP
:
2777 hpsa_print_cmd(h
, "controller lockup detected", cp
);
2780 hpsa_print_cmd(h
, "unknown status", cp
);
2781 dev_warn(d
, "Unknown command status %x\n",
2786 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2787 u16 page
, unsigned char *buf
,
2788 unsigned char bufsize
)
2791 struct CommandList
*c
;
2792 struct ErrorInfo
*ei
;
2796 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
2797 page
, scsi3addr
, TYPE_CMD
)) {
2801 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
2802 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
2806 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2807 hpsa_scsi_interpret_error(h
, c
);
2815 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2816 u8 reset_type
, int reply_queue
)
2819 struct CommandList
*c
;
2820 struct ErrorInfo
*ei
;
2825 /* fill_cmd can't fail here, no data buffer to map. */
2826 (void) fill_cmd(c
, reset_type
, h
, NULL
, 0, 0,
2827 scsi3addr
, TYPE_MSG
);
2828 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
2830 dev_warn(&h
->pdev
->dev
, "Failed to send reset command\n");
2833 /* no unmap needed here because no data xfer. */
2836 if (ei
->CommandStatus
!= 0) {
2837 hpsa_scsi_interpret_error(h
, c
);
2845 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
2846 struct hpsa_scsi_dev_t
*dev
,
2847 unsigned char *scsi3addr
)
2851 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2852 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
2854 if (hpsa_is_cmd_idle(c
))
2857 switch (c
->cmd_type
) {
2859 case CMD_IOCTL_PEND
:
2860 match
= !memcmp(scsi3addr
, &c
->Header
.LUN
.LunAddrBytes
,
2861 sizeof(c
->Header
.LUN
.LunAddrBytes
));
2866 if (c
->phys_disk
== dev
) {
2867 /* HBA mode match */
2870 /* Possible RAID mode -- check each phys dev. */
2871 /* FIXME: Do we need to take out a lock here? If
2872 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2874 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
2875 /* FIXME: an alternate test might be
2877 * match = dev->phys_disk[i]->ioaccel_handle
2878 * == c2->scsi_nexus; */
2879 match
= dev
->phys_disk
[i
] == c
->phys_disk
;
2885 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
2886 match
= dev
->phys_disk
[i
]->ioaccel_handle
==
2887 le32_to_cpu(ac
->it_nexus
);
2891 case 0: /* The command is in the middle of being initialized. */
2896 dev_err(&h
->pdev
->dev
, "unexpected cmd_type: %d\n",
2904 static int hpsa_do_reset(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*dev
,
2905 unsigned char *scsi3addr
, u8 reset_type
, int reply_queue
)
2910 /* We can really only handle one reset at a time */
2911 if (mutex_lock_interruptible(&h
->reset_mutex
) == -EINTR
) {
2912 dev_warn(&h
->pdev
->dev
, "concurrent reset wait interrupted.\n");
2916 BUG_ON(atomic_read(&dev
->reset_cmds_out
) != 0);
2918 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2919 struct CommandList
*c
= h
->cmd_pool
+ i
;
2920 int refcount
= atomic_inc_return(&c
->refcount
);
2922 if (refcount
> 1 && hpsa_cmd_dev_match(h
, c
, dev
, scsi3addr
)) {
2923 unsigned long flags
;
2926 * Mark the target command as having a reset pending,
2927 * then lock a lock so that the command cannot complete
2928 * while we're considering it. If the command is not
2929 * idle then count it; otherwise revoke the event.
2931 c
->reset_pending
= dev
;
2932 spin_lock_irqsave(&h
->lock
, flags
); /* Implied MB */
2933 if (!hpsa_is_cmd_idle(c
))
2934 atomic_inc(&dev
->reset_cmds_out
);
2936 c
->reset_pending
= NULL
;
2937 spin_unlock_irqrestore(&h
->lock
, flags
);
2943 rc
= hpsa_send_reset(h
, scsi3addr
, reset_type
, reply_queue
);
2945 wait_event(h
->event_sync_wait_queue
,
2946 atomic_read(&dev
->reset_cmds_out
) == 0 ||
2947 lockup_detected(h
));
2949 if (unlikely(lockup_detected(h
))) {
2950 dev_warn(&h
->pdev
->dev
,
2951 "Controller lockup detected during reset wait\n");
2956 atomic_set(&dev
->reset_cmds_out
, 0);
2958 mutex_unlock(&h
->reset_mutex
);
2962 static void hpsa_get_raid_level(struct ctlr_info
*h
,
2963 unsigned char *scsi3addr
, unsigned char *raid_level
)
2968 *raid_level
= RAID_UNKNOWN
;
2969 buf
= kzalloc(64, GFP_KERNEL
);
2972 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0xC1, buf
, 64);
2974 *raid_level
= buf
[8];
2975 if (*raid_level
> RAID_UNKNOWN
)
2976 *raid_level
= RAID_UNKNOWN
;
2981 #define HPSA_MAP_DEBUG
2982 #ifdef HPSA_MAP_DEBUG
2983 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
2984 struct raid_map_data
*map_buff
)
2986 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
2988 u16 map_cnt
, row_cnt
, disks_per_row
;
2993 /* Show details only if debugging has been activated. */
2994 if (h
->raid_offload_debug
< 2)
2997 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
2998 le32_to_cpu(map_buff
->structure_size
));
2999 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
3000 le32_to_cpu(map_buff
->volume_blk_size
));
3001 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
3002 le64_to_cpu(map_buff
->volume_blk_cnt
));
3003 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
3004 map_buff
->phys_blk_shift
);
3005 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
3006 map_buff
->parity_rotation_shift
);
3007 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
3008 le16_to_cpu(map_buff
->strip_size
));
3009 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
3010 le64_to_cpu(map_buff
->disk_starting_blk
));
3011 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
3012 le64_to_cpu(map_buff
->disk_blk_cnt
));
3013 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
3014 le16_to_cpu(map_buff
->data_disks_per_row
));
3015 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
3016 le16_to_cpu(map_buff
->metadata_disks_per_row
));
3017 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
3018 le16_to_cpu(map_buff
->row_cnt
));
3019 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
3020 le16_to_cpu(map_buff
->layout_map_count
));
3021 dev_info(&h
->pdev
->dev
, "flags = 0x%x\n",
3022 le16_to_cpu(map_buff
->flags
));
3023 dev_info(&h
->pdev
->dev
, "encrypytion = %s\n",
3024 le16_to_cpu(map_buff
->flags
) &
3025 RAID_MAP_FLAG_ENCRYPT_ON
? "ON" : "OFF");
3026 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
3027 le16_to_cpu(map_buff
->dekindex
));
3028 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
3029 for (map
= 0; map
< map_cnt
; map
++) {
3030 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
3031 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
3032 for (row
= 0; row
< row_cnt
; row
++) {
3033 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
3035 le16_to_cpu(map_buff
->data_disks_per_row
);
3036 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3037 dev_info(&h
->pdev
->dev
,
3038 " D%02u: h=0x%04x xor=%u,%u\n",
3039 col
, dd
->ioaccel_handle
,
3040 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3042 le16_to_cpu(map_buff
->metadata_disks_per_row
);
3043 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3044 dev_info(&h
->pdev
->dev
,
3045 " M%02u: h=0x%04x xor=%u,%u\n",
3046 col
, dd
->ioaccel_handle
,
3047 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3052 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
3053 __attribute__((unused
)) int rc
,
3054 __attribute__((unused
)) struct raid_map_data
*map_buff
)
3059 static int hpsa_get_raid_map(struct ctlr_info
*h
,
3060 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3063 struct CommandList
*c
;
3064 struct ErrorInfo
*ei
;
3068 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
3069 sizeof(this_device
->raid_map
), 0,
3070 scsi3addr
, TYPE_CMD
)) {
3071 dev_warn(&h
->pdev
->dev
, "hpsa_get_raid_map fill_cmd failed\n");
3075 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3076 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
3080 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3081 hpsa_scsi_interpret_error(h
, c
);
3087 /* @todo in the future, dynamically allocate RAID map memory */
3088 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
3089 sizeof(this_device
->raid_map
)) {
3090 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
3093 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
3100 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info
*h
,
3101 unsigned char scsi3addr
[], u16 bmic_device_index
,
3102 struct bmic_sense_subsystem_info
*buf
, size_t bufsize
)
3105 struct CommandList
*c
;
3106 struct ErrorInfo
*ei
;
3110 rc
= fill_cmd(c
, BMIC_SENSE_SUBSYSTEM_INFORMATION
, h
, buf
, bufsize
,
3111 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3115 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3116 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3118 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3119 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
3123 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3124 hpsa_scsi_interpret_error(h
, c
);
3132 static int hpsa_bmic_id_controller(struct ctlr_info
*h
,
3133 struct bmic_identify_controller
*buf
, size_t bufsize
)
3136 struct CommandList
*c
;
3137 struct ErrorInfo
*ei
;
3141 rc
= fill_cmd(c
, BMIC_IDENTIFY_CONTROLLER
, h
, buf
, bufsize
,
3142 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3146 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3147 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
3151 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3152 hpsa_scsi_interpret_error(h
, c
);
3160 static int hpsa_bmic_id_physical_device(struct ctlr_info
*h
,
3161 unsigned char scsi3addr
[], u16 bmic_device_index
,
3162 struct bmic_identify_physical_device
*buf
, size_t bufsize
)
3165 struct CommandList
*c
;
3166 struct ErrorInfo
*ei
;
3169 rc
= fill_cmd(c
, BMIC_IDENTIFY_PHYSICAL_DEVICE
, h
, buf
, bufsize
,
3170 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3174 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3175 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3177 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
,
3180 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3181 hpsa_scsi_interpret_error(h
, c
);
3191 * get enclosure information
3192 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3193 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3194 * Uses id_physical_device to determine the box_index.
3196 static void hpsa_get_enclosure_info(struct ctlr_info
*h
,
3197 unsigned char *scsi3addr
,
3198 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3199 struct hpsa_scsi_dev_t
*encl_dev
)
3202 struct CommandList
*c
= NULL
;
3203 struct ErrorInfo
*ei
= NULL
;
3204 struct bmic_sense_storage_box_params
*bssbp
= NULL
;
3205 struct bmic_identify_physical_device
*id_phys
= NULL
;
3206 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3207 u16 bmic_device_index
= 0;
3209 bmic_device_index
= GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]);
3211 if (bmic_device_index
== 0xFF00)
3214 bssbp
= kzalloc(sizeof(*bssbp
), GFP_KERNEL
);
3218 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
3222 rc
= hpsa_bmic_id_physical_device(h
, scsi3addr
, bmic_device_index
,
3223 id_phys
, sizeof(*id_phys
));
3225 dev_warn(&h
->pdev
->dev
, "%s: id_phys failed %d bdi[0x%x]\n",
3226 __func__
, encl_dev
->external
, bmic_device_index
);
3232 rc
= fill_cmd(c
, BMIC_SENSE_STORAGE_BOX_PARAMS
, h
, bssbp
,
3233 sizeof(*bssbp
), 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3238 if (id_phys
->phys_connector
[1] == 'E')
3239 c
->Request
.CDB
[5] = id_phys
->box_index
;
3241 c
->Request
.CDB
[5] = 0;
3243 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
,
3249 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3254 encl_dev
->box
[id_phys
->active_path_number
] = bssbp
->phys_box_on_port
;
3255 memcpy(&encl_dev
->phys_connector
[id_phys
->active_path_number
],
3256 bssbp
->phys_connector
, sizeof(bssbp
->phys_connector
));
3267 hpsa_show_dev_msg(KERN_INFO
, h
, encl_dev
,
3268 "Error, could not get enclosure information\n");
3271 static u64
hpsa_get_sas_address_from_report_physical(struct ctlr_info
*h
,
3272 unsigned char *scsi3addr
)
3274 struct ReportExtendedLUNdata
*physdev
;
3279 physdev
= kzalloc(sizeof(*physdev
), GFP_KERNEL
);
3283 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
3284 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
3288 nphysicals
= get_unaligned_be32(physdev
->LUNListLength
) / 24;
3290 for (i
= 0; i
< nphysicals
; i
++)
3291 if (!memcmp(&physdev
->LUN
[i
].lunid
[0], scsi3addr
, 8)) {
3292 sa
= get_unaligned_be64(&physdev
->LUN
[i
].wwid
[0]);
3301 static void hpsa_get_sas_address(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3302 struct hpsa_scsi_dev_t
*dev
)
3307 if (is_hba_lunid(scsi3addr
)) {
3308 struct bmic_sense_subsystem_info
*ssi
;
3310 ssi
= kzalloc(sizeof(*ssi
), GFP_KERNEL
);
3312 dev_warn(&h
->pdev
->dev
,
3313 "%s: out of memory\n", __func__
);
3317 rc
= hpsa_bmic_sense_subsystem_information(h
,
3318 scsi3addr
, 0, ssi
, sizeof(*ssi
));
3320 sa
= get_unaligned_be64(ssi
->primary_world_wide_id
);
3321 h
->sas_address
= sa
;
3326 sa
= hpsa_get_sas_address_from_report_physical(h
, scsi3addr
);
3328 dev
->sas_address
= sa
;
3331 /* Get a device id from inquiry page 0x83 */
3332 static int hpsa_vpd_page_supported(struct ctlr_info
*h
,
3333 unsigned char scsi3addr
[], u8 page
)
3338 unsigned char *buf
, bufsize
;
3340 buf
= kzalloc(256, GFP_KERNEL
);
3344 /* Get the size of the page list first */
3345 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3346 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3347 buf
, HPSA_VPD_HEADER_SZ
);
3349 goto exit_unsupported
;
3351 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
3352 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
3356 /* Get the whole VPD page list */
3357 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3358 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3361 goto exit_unsupported
;
3364 for (i
= 1; i
<= pages
; i
++)
3365 if (buf
[3 + i
] == page
)
3366 goto exit_supported
;
3375 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
3376 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3382 this_device
->offload_config
= 0;
3383 this_device
->offload_enabled
= 0;
3384 this_device
->offload_to_be_enabled
= 0;
3386 buf
= kzalloc(64, GFP_KERNEL
);
3389 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
3391 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3392 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
3396 #define IOACCEL_STATUS_BYTE 4
3397 #define OFFLOAD_CONFIGURED_BIT 0x01
3398 #define OFFLOAD_ENABLED_BIT 0x02
3399 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
3400 this_device
->offload_config
=
3401 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
3402 if (this_device
->offload_config
) {
3403 this_device
->offload_enabled
=
3404 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
3405 if (hpsa_get_raid_map(h
, scsi3addr
, this_device
))
3406 this_device
->offload_enabled
= 0;
3408 this_device
->offload_to_be_enabled
= this_device
->offload_enabled
;
3414 /* Get the device id from inquiry page 0x83 */
3415 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3416 unsigned char *device_id
, int index
, int buflen
)
3423 buf
= kzalloc(64, GFP_KERNEL
);
3426 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0x83, buf
, 64);
3428 memcpy(device_id
, &buf
[index
], buflen
);
3435 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
3436 void *buf
, int bufsize
,
3437 int extended_response
)
3440 struct CommandList
*c
;
3441 unsigned char scsi3addr
[8];
3442 struct ErrorInfo
*ei
;
3446 /* address the controller */
3447 memset(scsi3addr
, 0, sizeof(scsi3addr
));
3448 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
3449 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
3453 if (extended_response
)
3454 c
->Request
.CDB
[1] = extended_response
;
3455 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3456 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
3460 if (ei
->CommandStatus
!= 0 &&
3461 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3462 hpsa_scsi_interpret_error(h
, c
);
3465 struct ReportLUNdata
*rld
= buf
;
3467 if (rld
->extended_response_flag
!= extended_response
) {
3468 dev_err(&h
->pdev
->dev
,
3469 "report luns requested format %u, got %u\n",
3471 rld
->extended_response_flag
);
3480 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
3481 struct ReportExtendedLUNdata
*buf
, int bufsize
)
3483 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
,
3484 HPSA_REPORT_PHYS_EXTENDED
);
3487 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
3488 struct ReportLUNdata
*buf
, int bufsize
)
3490 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
3493 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
3494 int bus
, int target
, int lun
)
3497 device
->target
= target
;
3501 /* Use VPD inquiry to get details of volume status */
3502 static int hpsa_get_volume_status(struct ctlr_info
*h
,
3503 unsigned char scsi3addr
[])
3510 buf
= kzalloc(64, GFP_KERNEL
);
3512 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3514 /* Does controller have VPD for logical volume status? */
3515 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
))
3518 /* Get the size of the VPD return buffer */
3519 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3520 buf
, HPSA_VPD_HEADER_SZ
);
3525 /* Now get the whole VPD buffer */
3526 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3527 buf
, size
+ HPSA_VPD_HEADER_SZ
);
3530 status
= buf
[4]; /* status byte */
3536 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3539 /* Determine offline status of a volume.
3542 * 0xff (offline for unknown reasons)
3543 * # (integer code indicating one of several NOT READY states
3544 * describing why a volume is to be kept offline)
3546 static int hpsa_volume_offline(struct ctlr_info
*h
,
3547 unsigned char scsi3addr
[])
3549 struct CommandList
*c
;
3550 unsigned char *sense
;
3551 u8 sense_key
, asc
, ascq
;
3556 #define ASC_LUN_NOT_READY 0x04
3557 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3558 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3562 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
3563 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
3568 sense
= c
->err_info
->SenseInfo
;
3569 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
3570 sense_len
= sizeof(c
->err_info
->SenseInfo
);
3572 sense_len
= c
->err_info
->SenseLen
;
3573 decode_sense_data(sense
, sense_len
, &sense_key
, &asc
, &ascq
);
3574 cmd_status
= c
->err_info
->CommandStatus
;
3575 scsi_status
= c
->err_info
->ScsiStatus
;
3577 /* Is the volume 'not ready'? */
3578 if (cmd_status
!= CMD_TARGET_STATUS
||
3579 scsi_status
!= SAM_STAT_CHECK_CONDITION
||
3580 sense_key
!= NOT_READY
||
3581 asc
!= ASC_LUN_NOT_READY
) {
3585 /* Determine the reason for not ready state */
3586 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
3588 /* Keep volume offline in certain cases: */
3590 case HPSA_LV_UNDERGOING_ERASE
:
3591 case HPSA_LV_NOT_AVAILABLE
:
3592 case HPSA_LV_UNDERGOING_RPI
:
3593 case HPSA_LV_PENDING_RPI
:
3594 case HPSA_LV_ENCRYPTED_NO_KEY
:
3595 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
3596 case HPSA_LV_UNDERGOING_ENCRYPTION
:
3597 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
3598 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
3600 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
3601 /* If VPD status page isn't available,
3602 * use ASC/ASCQ to determine state
3604 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
3605 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
3615 * Find out if a logical device supports aborts by simply trying one.
3616 * Smart Array may claim not to support aborts on logical drives, but
3617 * if a MSA2000 * is connected, the drives on that will be presented
3618 * by the Smart Array as logical drives, and aborts may be sent to
3619 * those devices successfully. So the simplest way to find out is
3620 * to simply try an abort and see how the device responds.
3622 static int hpsa_device_supports_aborts(struct ctlr_info
*h
,
3623 unsigned char *scsi3addr
)
3625 struct CommandList
*c
;
3626 struct ErrorInfo
*ei
;
3629 u64 tag
= (u64
) -1; /* bogus tag */
3631 /* Assume that physical devices support aborts */
3632 if (!is_logical_dev_addr_mode(scsi3addr
))
3637 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &tag
, 0, 0, scsi3addr
, TYPE_MSG
);
3638 (void) hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
3639 /* no unmap needed here because no data xfer. */
3641 switch (ei
->CommandStatus
) {
3645 case CMD_UNABORTABLE
:
3646 case CMD_ABORT_FAILED
:
3649 case CMD_TMF_STATUS
:
3650 rc
= hpsa_evaluate_tmf_status(h
, c
);
3660 static void sanitize_inquiry_string(unsigned char *s
, int len
)
3662 bool terminated
= false;
3664 for (; len
> 0; (--len
, ++s
)) {
3667 if (terminated
|| *s
< 0x20 || *s
> 0x7e)
3672 static int hpsa_update_device_info(struct ctlr_info
*h
,
3673 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
3674 unsigned char *is_OBDR_device
)
3677 #define OBDR_SIG_OFFSET 43
3678 #define OBDR_TAPE_SIG "$DR-10"
3679 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3680 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3682 unsigned char *inq_buff
;
3683 unsigned char *obdr_sig
;
3686 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
3692 /* Do an inquiry to the device to see what it is. */
3693 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
3694 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
3695 /* Inquiry failed (msg printed already) */
3696 dev_err(&h
->pdev
->dev
,
3697 "hpsa_update_device_info: inquiry failed\n");
3702 sanitize_inquiry_string(&inq_buff
[8], 8);
3703 sanitize_inquiry_string(&inq_buff
[16], 16);
3705 this_device
->devtype
= (inq_buff
[0] & 0x1f);
3706 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
3707 memcpy(this_device
->vendor
, &inq_buff
[8],
3708 sizeof(this_device
->vendor
));
3709 memcpy(this_device
->model
, &inq_buff
[16],
3710 sizeof(this_device
->model
));
3711 memset(this_device
->device_id
, 0,
3712 sizeof(this_device
->device_id
));
3713 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
, 8,
3714 sizeof(this_device
->device_id
));
3716 if (this_device
->devtype
== TYPE_DISK
&&
3717 is_logical_dev_addr_mode(scsi3addr
)) {
3720 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
3721 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
3722 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
3723 volume_offline
= hpsa_volume_offline(h
, scsi3addr
);
3724 if (volume_offline
< 0 || volume_offline
> 0xff)
3725 volume_offline
= HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3726 this_device
->volume_offline
= volume_offline
& 0xff;
3728 this_device
->raid_level
= RAID_UNKNOWN
;
3729 this_device
->offload_config
= 0;
3730 this_device
->offload_enabled
= 0;
3731 this_device
->offload_to_be_enabled
= 0;
3732 this_device
->hba_ioaccel_enabled
= 0;
3733 this_device
->volume_offline
= 0;
3734 this_device
->queue_depth
= h
->nr_cmds
;
3737 if (is_OBDR_device
) {
3738 /* See if this is a One-Button-Disaster-Recovery device
3739 * by looking for "$DR-10" at offset 43 in inquiry data.
3741 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
3742 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
3743 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
3744 OBDR_SIG_LEN
) == 0);
3754 static void hpsa_update_device_supports_aborts(struct ctlr_info
*h
,
3755 struct hpsa_scsi_dev_t
*dev
, u8
*scsi3addr
)
3757 unsigned long flags
;
3760 * See if this device supports aborts. If we already know
3761 * the device, we already know if it supports aborts, otherwise
3762 * we have to find out if it supports aborts by trying one.
3764 spin_lock_irqsave(&h
->devlock
, flags
);
3765 rc
= hpsa_scsi_find_entry(dev
, h
->dev
, h
->ndevices
, &entry
);
3766 if ((rc
== DEVICE_SAME
|| rc
== DEVICE_UPDATED
) &&
3767 entry
>= 0 && entry
< h
->ndevices
) {
3768 dev
->supports_aborts
= h
->dev
[entry
]->supports_aborts
;
3769 spin_unlock_irqrestore(&h
->devlock
, flags
);
3771 spin_unlock_irqrestore(&h
->devlock
, flags
);
3772 dev
->supports_aborts
=
3773 hpsa_device_supports_aborts(h
, scsi3addr
);
3774 if (dev
->supports_aborts
< 0)
3775 dev
->supports_aborts
= 0;
3780 * Helper function to assign bus, target, lun mapping of devices.
3781 * Logical drive target and lun are assigned at this time, but
3782 * physical device lun and target assignment are deferred (assigned
3783 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3785 static void figure_bus_target_lun(struct ctlr_info
*h
,
3786 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
3788 u32 lunid
= get_unaligned_le32(lunaddrbytes
);
3790 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
3791 /* physical device, target and lun filled in later */
3792 if (is_hba_lunid(lunaddrbytes
))
3793 hpsa_set_bus_target_lun(device
,
3794 HPSA_HBA_BUS
, 0, lunid
& 0x3fff);
3796 /* defer target, lun assignment for physical devices */
3797 hpsa_set_bus_target_lun(device
,
3798 HPSA_PHYSICAL_DEVICE_BUS
, -1, -1);
3801 /* It's a logical device */
3802 if (device
->external
) {
3803 hpsa_set_bus_target_lun(device
,
3804 HPSA_EXTERNAL_RAID_VOLUME_BUS
, (lunid
>> 16) & 0x3fff,
3808 hpsa_set_bus_target_lun(device
, HPSA_RAID_VOLUME_BUS
,
3814 * Get address of physical disk used for an ioaccel2 mode command:
3815 * 1. Extract ioaccel2 handle from the command.
3816 * 2. Find a matching ioaccel2 handle from list of physical disks.
3818 * 1 and set scsi3addr to address of matching physical
3819 * 0 if no matching physical disk was found.
3821 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info
*h
,
3822 struct CommandList
*ioaccel2_cmd_to_abort
, unsigned char *scsi3addr
)
3824 struct io_accel2_cmd
*c2
=
3825 &h
->ioaccel2_cmd_pool
[ioaccel2_cmd_to_abort
->cmdindex
];
3826 unsigned long flags
;
3829 spin_lock_irqsave(&h
->devlock
, flags
);
3830 for (i
= 0; i
< h
->ndevices
; i
++)
3831 if (h
->dev
[i
]->ioaccel_handle
== le32_to_cpu(c2
->scsi_nexus
)) {
3832 memcpy(scsi3addr
, h
->dev
[i
]->scsi3addr
,
3833 sizeof(h
->dev
[i
]->scsi3addr
));
3834 spin_unlock_irqrestore(&h
->devlock
, flags
);
3837 spin_unlock_irqrestore(&h
->devlock
, flags
);
3841 static int figure_external_status(struct ctlr_info
*h
, int raid_ctlr_position
,
3842 int i
, int nphysicals
, int nlocal_logicals
)
3844 /* In report logicals, local logicals are listed first,
3845 * then any externals.
3847 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
3849 if (i
== raid_ctlr_position
)
3852 if (i
< logicals_start
)
3855 /* i is in logicals range, but still within local logicals */
3856 if ((i
- nphysicals
- (raid_ctlr_position
== 0)) < nlocal_logicals
)
3859 return 1; /* it's an external lun */
3863 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3864 * logdev. The number of luns in physdev and logdev are returned in
3865 * *nphysicals and *nlogicals, respectively.
3866 * Returns 0 on success, -1 otherwise.
3868 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
3869 struct ReportExtendedLUNdata
*physdev
, u32
*nphysicals
,
3870 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
3872 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
3873 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
3876 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 24;
3877 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
3878 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3879 HPSA_MAX_PHYS_LUN
, *nphysicals
- HPSA_MAX_PHYS_LUN
);
3880 *nphysicals
= HPSA_MAX_PHYS_LUN
;
3882 if (hpsa_scsi_do_report_log_luns(h
, logdev
, sizeof(*logdev
))) {
3883 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
3886 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
3887 /* Reject Logicals in excess of our max capability. */
3888 if (*nlogicals
> HPSA_MAX_LUN
) {
3889 dev_warn(&h
->pdev
->dev
,
3890 "maximum logical LUNs (%d) exceeded. "
3891 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
3892 *nlogicals
- HPSA_MAX_LUN
);
3893 *nlogicals
= HPSA_MAX_LUN
;
3895 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
3896 dev_warn(&h
->pdev
->dev
,
3897 "maximum logical + physical LUNs (%d) exceeded. "
3898 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
3899 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
3900 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
3905 static u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
,
3906 int i
, int nphysicals
, int nlogicals
,
3907 struct ReportExtendedLUNdata
*physdev_list
,
3908 struct ReportLUNdata
*logdev_list
)
3910 /* Helper function, figure out where the LUN ID info is coming from
3911 * given index i, lists of physical and logical devices, where in
3912 * the list the raid controller is supposed to appear (first or last)
3915 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
3916 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
3918 if (i
== raid_ctlr_position
)
3919 return RAID_CTLR_LUNID
;
3921 if (i
< logicals_start
)
3922 return &physdev_list
->LUN
[i
-
3923 (raid_ctlr_position
== 0)].lunid
[0];
3925 if (i
< last_device
)
3926 return &logdev_list
->LUN
[i
- nphysicals
-
3927 (raid_ctlr_position
== 0)][0];
3932 /* get physical drive ioaccel handle and queue depth */
3933 static void hpsa_get_ioaccel_drive_info(struct ctlr_info
*h
,
3934 struct hpsa_scsi_dev_t
*dev
,
3935 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3936 struct bmic_identify_physical_device
*id_phys
)
3939 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3941 dev
->ioaccel_handle
= rle
->ioaccel_handle
;
3942 if ((rle
->device_flags
& 0x08) && dev
->ioaccel_handle
)
3943 dev
->hba_ioaccel_enabled
= 1;
3944 memset(id_phys
, 0, sizeof(*id_phys
));
3945 rc
= hpsa_bmic_id_physical_device(h
, &rle
->lunid
[0],
3946 GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]), id_phys
,
3949 /* Reserve space for FW operations */
3950 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3951 #define DRIVE_QUEUE_DEPTH 7
3953 le16_to_cpu(id_phys
->current_queue_depth_limit
) -
3954 DRIVE_CMDS_RESERVED_FOR_FW
;
3956 dev
->queue_depth
= DRIVE_QUEUE_DEPTH
; /* conservative */
3959 static void hpsa_get_path_info(struct hpsa_scsi_dev_t
*this_device
,
3960 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3961 struct bmic_identify_physical_device
*id_phys
)
3963 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3965 if ((rle
->device_flags
& 0x08) && this_device
->ioaccel_handle
)
3966 this_device
->hba_ioaccel_enabled
= 1;
3968 memcpy(&this_device
->active_path_index
,
3969 &id_phys
->active_path_number
,
3970 sizeof(this_device
->active_path_index
));
3971 memcpy(&this_device
->path_map
,
3972 &id_phys
->redundant_path_present_map
,
3973 sizeof(this_device
->path_map
));
3974 memcpy(&this_device
->box
,
3975 &id_phys
->alternate_paths_phys_box_on_port
,
3976 sizeof(this_device
->box
));
3977 memcpy(&this_device
->phys_connector
,
3978 &id_phys
->alternate_paths_phys_connector
,
3979 sizeof(this_device
->phys_connector
));
3980 memcpy(&this_device
->bay
,
3981 &id_phys
->phys_bay_in_box
,
3982 sizeof(this_device
->bay
));
3985 /* get number of local logical disks. */
3986 static int hpsa_set_local_logical_count(struct ctlr_info
*h
,
3987 struct bmic_identify_controller
*id_ctlr
,
3993 dev_warn(&h
->pdev
->dev
, "%s: id_ctlr buffer is NULL.\n",
3997 memset(id_ctlr
, 0, sizeof(*id_ctlr
));
3998 rc
= hpsa_bmic_id_controller(h
, id_ctlr
, sizeof(*id_ctlr
));
4000 if (id_ctlr
->configured_logical_drive_count
< 256)
4001 *nlocals
= id_ctlr
->configured_logical_drive_count
;
4003 *nlocals
= le16_to_cpu(
4004 id_ctlr
->extended_logical_unit_count
);
4011 static void hpsa_update_scsi_devices(struct ctlr_info
*h
)
4013 /* the idea here is we could get notified
4014 * that some devices have changed, so we do a report
4015 * physical luns and report logical luns cmd, and adjust
4016 * our list of devices accordingly.
4018 * The scsi3addr's of devices won't change so long as the
4019 * adapter is not reset. That means we can rescan and
4020 * tell which devices we already know about, vs. new
4021 * devices, vs. disappearing devices.
4023 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
4024 struct ReportLUNdata
*logdev_list
= NULL
;
4025 struct bmic_identify_physical_device
*id_phys
= NULL
;
4026 struct bmic_identify_controller
*id_ctlr
= NULL
;
4029 u32 nlocal_logicals
= 0;
4030 u32 ndev_allocated
= 0;
4031 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
4033 int i
, n_ext_target_devs
, ndevs_to_allocate
;
4034 int raid_ctlr_position
;
4035 bool physical_device
;
4036 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
4038 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
4039 physdev_list
= kzalloc(sizeof(*physdev_list
), GFP_KERNEL
);
4040 logdev_list
= kzalloc(sizeof(*logdev_list
), GFP_KERNEL
);
4041 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
4042 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
4043 id_ctlr
= kzalloc(sizeof(*id_ctlr
), GFP_KERNEL
);
4045 if (!currentsd
|| !physdev_list
|| !logdev_list
||
4046 !tmpdevice
|| !id_phys
|| !id_ctlr
) {
4047 dev_err(&h
->pdev
->dev
, "out of memory\n");
4050 memset(lunzerobits
, 0, sizeof(lunzerobits
));
4052 h
->drv_req_rescan
= 0; /* cancel scheduled rescan - we're doing it. */
4054 if (hpsa_gather_lun_info(h
, physdev_list
, &nphysicals
,
4055 logdev_list
, &nlogicals
)) {
4056 h
->drv_req_rescan
= 1;
4060 /* Set number of local logicals (non PTRAID) */
4061 if (hpsa_set_local_logical_count(h
, id_ctlr
, &nlocal_logicals
)) {
4062 dev_warn(&h
->pdev
->dev
,
4063 "%s: Can't determine number of local logical devices.\n",
4067 /* We might see up to the maximum number of logical and physical disks
4068 * plus external target devices, and a device for the local RAID
4071 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
4073 /* Allocate the per device structures */
4074 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
4075 if (i
>= HPSA_MAX_DEVICES
) {
4076 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
4077 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
4078 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
4082 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
4083 if (!currentsd
[i
]) {
4084 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
4085 __FILE__
, __LINE__
);
4086 h
->drv_req_rescan
= 1;
4092 if (is_scsi_rev_5(h
))
4093 raid_ctlr_position
= 0;
4095 raid_ctlr_position
= nphysicals
+ nlogicals
;
4097 /* adjust our table of devices */
4098 n_ext_target_devs
= 0;
4099 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
4100 u8
*lunaddrbytes
, is_OBDR
= 0;
4102 int phys_dev_index
= i
- (raid_ctlr_position
== 0);
4104 physical_device
= i
< nphysicals
+ (raid_ctlr_position
== 0);
4106 /* Figure out where the LUN ID info is coming from */
4107 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
4108 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
4110 /* skip masked non-disk devices */
4111 if (MASKED_DEVICE(lunaddrbytes
) && physical_device
&&
4112 (physdev_list
->LUN
[phys_dev_index
].device_type
!= 0x06) &&
4113 (physdev_list
->LUN
[phys_dev_index
].device_flags
& 0x01))
4116 /* Get device type, vendor, model, device id */
4117 rc
= hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
4119 if (rc
== -ENOMEM
) {
4120 dev_warn(&h
->pdev
->dev
,
4121 "Out of memory, rescan deferred.\n");
4122 h
->drv_req_rescan
= 1;
4126 dev_warn(&h
->pdev
->dev
,
4127 "Inquiry failed, skipping device.\n");
4131 /* Determine if this is a lun from an external target array */
4132 tmpdevice
->external
=
4133 figure_external_status(h
, raid_ctlr_position
, i
,
4134 nphysicals
, nlocal_logicals
);
4136 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
4137 hpsa_update_device_supports_aborts(h
, tmpdevice
, lunaddrbytes
);
4138 this_device
= currentsd
[ncurrent
];
4140 /* Turn on discovery_polling if there are ext target devices.
4141 * Event-based change notification is unreliable for those.
4143 if (!h
->discovery_polling
) {
4144 if (tmpdevice
->external
) {
4145 h
->discovery_polling
= 1;
4146 dev_info(&h
->pdev
->dev
,
4147 "External target, activate discovery polling.\n");
4152 *this_device
= *tmpdevice
;
4153 this_device
->physical_device
= physical_device
;
4156 * Expose all devices except for physical devices that
4159 if (MASKED_DEVICE(lunaddrbytes
) && this_device
->physical_device
)
4160 this_device
->expose_device
= 0;
4162 this_device
->expose_device
= 1;
4166 * Get the SAS address for physical devices that are exposed.
4168 if (this_device
->physical_device
&& this_device
->expose_device
)
4169 hpsa_get_sas_address(h
, lunaddrbytes
, this_device
);
4171 switch (this_device
->devtype
) {
4173 /* We don't *really* support actual CD-ROM devices,
4174 * just "One Button Disaster Recovery" tape drive
4175 * which temporarily pretends to be a CD-ROM drive.
4176 * So we check that the device is really an OBDR tape
4177 * device by checking for "$DR-10" in bytes 43-48 of
4184 if (this_device
->physical_device
) {
4185 /* The disk is in HBA mode. */
4186 /* Never use RAID mapper in HBA mode. */
4187 this_device
->offload_enabled
= 0;
4188 hpsa_get_ioaccel_drive_info(h
, this_device
,
4189 physdev_list
, phys_dev_index
, id_phys
);
4190 hpsa_get_path_info(this_device
,
4191 physdev_list
, phys_dev_index
, id_phys
);
4196 case TYPE_MEDIUM_CHANGER
:
4199 case TYPE_ENCLOSURE
:
4200 hpsa_get_enclosure_info(h
, lunaddrbytes
,
4201 physdev_list
, phys_dev_index
,
4206 /* Only present the Smartarray HBA as a RAID controller.
4207 * If it's a RAID controller other than the HBA itself
4208 * (an external RAID controller, MSA500 or similar)
4211 if (!is_hba_lunid(lunaddrbytes
))
4218 if (ncurrent
>= HPSA_MAX_DEVICES
)
4222 if (h
->sas_host
== NULL
) {
4225 rc
= hpsa_add_sas_host(h
);
4227 dev_warn(&h
->pdev
->dev
,
4228 "Could not add sas host %d\n", rc
);
4233 adjust_hpsa_scsi_table(h
, currentsd
, ncurrent
);
4236 for (i
= 0; i
< ndev_allocated
; i
++)
4237 kfree(currentsd
[i
]);
4239 kfree(physdev_list
);
4245 static void hpsa_set_sg_descriptor(struct SGDescriptor
*desc
,
4246 struct scatterlist
*sg
)
4248 u64 addr64
= (u64
) sg_dma_address(sg
);
4249 unsigned int len
= sg_dma_len(sg
);
4251 desc
->Addr
= cpu_to_le64(addr64
);
4252 desc
->Len
= cpu_to_le32(len
);
4257 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4258 * dma mapping and fills in the scatter gather entries of the
4261 static int hpsa_scatter_gather(struct ctlr_info
*h
,
4262 struct CommandList
*cp
,
4263 struct scsi_cmnd
*cmd
)
4265 struct scatterlist
*sg
;
4266 int use_sg
, i
, sg_limit
, chained
, last_sg
;
4267 struct SGDescriptor
*curr_sg
;
4269 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4271 use_sg
= scsi_dma_map(cmd
);
4276 goto sglist_finished
;
4279 * If the number of entries is greater than the max for a single list,
4280 * then we have a chained list; we will set up all but one entry in the
4281 * first list (the last entry is saved for link information);
4282 * otherwise, we don't have a chained list and we'll set up at each of
4283 * the entries in the one list.
4286 chained
= use_sg
> h
->max_cmd_sg_entries
;
4287 sg_limit
= chained
? h
->max_cmd_sg_entries
- 1 : use_sg
;
4288 last_sg
= scsi_sg_count(cmd
) - 1;
4289 scsi_for_each_sg(cmd
, sg
, sg_limit
, i
) {
4290 hpsa_set_sg_descriptor(curr_sg
, sg
);
4296 * Continue with the chained list. Set curr_sg to the chained
4297 * list. Modify the limit to the total count less the entries
4298 * we've already set up. Resume the scan at the list entry
4299 * where the previous loop left off.
4301 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
4302 sg_limit
= use_sg
- sg_limit
;
4303 for_each_sg(sg
, sg
, sg_limit
, i
) {
4304 hpsa_set_sg_descriptor(curr_sg
, sg
);
4309 /* Back the pointer up to the last entry and mark it as "last". */
4310 (curr_sg
- 1)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4312 if (use_sg
+ chained
> h
->maxSG
)
4313 h
->maxSG
= use_sg
+ chained
;
4316 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
4317 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
+ 1);
4318 if (hpsa_map_sg_chain_block(h
, cp
)) {
4319 scsi_dma_unmap(cmd
);
4327 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
4328 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
); /* total sgs in cmd list */
4332 #define IO_ACCEL_INELIGIBLE (1)
4333 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
4339 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4346 if (*cdb_len
== 6) {
4347 block
= get_unaligned_be16(&cdb
[2]);
4352 BUG_ON(*cdb_len
!= 12);
4353 block
= get_unaligned_be32(&cdb
[2]);
4354 block_cnt
= get_unaligned_be32(&cdb
[6]);
4356 if (block_cnt
> 0xffff)
4357 return IO_ACCEL_INELIGIBLE
;
4359 cdb
[0] = is_write
? WRITE_10
: READ_10
;
4361 cdb
[2] = (u8
) (block
>> 24);
4362 cdb
[3] = (u8
) (block
>> 16);
4363 cdb
[4] = (u8
) (block
>> 8);
4364 cdb
[5] = (u8
) (block
);
4366 cdb
[7] = (u8
) (block_cnt
>> 8);
4367 cdb
[8] = (u8
) (block_cnt
);
4375 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
4376 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4377 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4379 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4380 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4382 unsigned int total_len
= 0;
4383 struct scatterlist
*sg
;
4386 struct SGDescriptor
*curr_sg
;
4387 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
4389 /* TODO: implement chaining support */
4390 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
4391 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4392 return IO_ACCEL_INELIGIBLE
;
4395 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
4397 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4398 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4399 return IO_ACCEL_INELIGIBLE
;
4402 c
->cmd_type
= CMD_IOACCEL1
;
4404 /* Adjust the DMA address to point to the accelerated command buffer */
4405 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
4406 (c
->cmdindex
* sizeof(*cp
));
4407 BUG_ON(c
->busaddr
& 0x0000007F);
4409 use_sg
= scsi_dma_map(cmd
);
4411 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4417 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4418 addr64
= (u64
) sg_dma_address(sg
);
4419 len
= sg_dma_len(sg
);
4421 curr_sg
->Addr
= cpu_to_le64(addr64
);
4422 curr_sg
->Len
= cpu_to_le32(len
);
4423 curr_sg
->Ext
= cpu_to_le32(0);
4426 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4428 switch (cmd
->sc_data_direction
) {
4430 control
|= IOACCEL1_CONTROL_DATA_OUT
;
4432 case DMA_FROM_DEVICE
:
4433 control
|= IOACCEL1_CONTROL_DATA_IN
;
4436 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4439 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4440 cmd
->sc_data_direction
);
4445 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4448 c
->Header
.SGList
= use_sg
;
4449 /* Fill out the command structure to submit */
4450 cp
->dev_handle
= cpu_to_le16(ioaccel_handle
& 0xFFFF);
4451 cp
->transfer_len
= cpu_to_le32(total_len
);
4452 cp
->io_flags
= cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ
|
4453 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
));
4454 cp
->control
= cpu_to_le32(control
);
4455 memcpy(cp
->CDB
, cdb
, cdb_len
);
4456 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
4457 /* Tag was already set at init time. */
4458 enqueue_cmd_and_start_io(h
, c
);
4463 * Queue a command directly to a device behind the controller using the
4464 * I/O accelerator path.
4466 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
4467 struct CommandList
*c
)
4469 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4470 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4474 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
4475 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
, dev
);
4479 * Set encryption parameters for the ioaccel2 request
4481 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
4482 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
4484 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4485 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4486 struct raid_map_data
*map
= &dev
->raid_map
;
4489 /* Are we doing encryption on this device */
4490 if (!(le16_to_cpu(map
->flags
) & RAID_MAP_FLAG_ENCRYPT_ON
))
4492 /* Set the data encryption key index. */
4493 cp
->dekindex
= map
->dekindex
;
4495 /* Set the encryption enable flag, encoded into direction field. */
4496 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
4498 /* Set encryption tweak values based on logical block address
4499 * If block size is 512, tweak value is LBA.
4500 * For other block sizes, tweak is (LBA * block size)/ 512)
4502 switch (cmd
->cmnd
[0]) {
4503 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4506 first_block
= get_unaligned_be16(&cmd
->cmnd
[2]);
4510 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4513 first_block
= get_unaligned_be32(&cmd
->cmnd
[2]);
4517 first_block
= get_unaligned_be64(&cmd
->cmnd
[2]);
4520 dev_err(&h
->pdev
->dev
,
4521 "ERROR: %s: size (0x%x) not supported for encryption\n",
4522 __func__
, cmd
->cmnd
[0]);
4527 if (le32_to_cpu(map
->volume_blk_size
) != 512)
4528 first_block
= first_block
*
4529 le32_to_cpu(map
->volume_blk_size
)/512;
4531 cp
->tweak_lower
= cpu_to_le32(first_block
);
4532 cp
->tweak_upper
= cpu_to_le32(first_block
>> 32);
4535 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
4536 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4537 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4539 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4540 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4541 struct ioaccel2_sg_element
*curr_sg
;
4543 struct scatterlist
*sg
;
4548 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4550 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4551 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4552 return IO_ACCEL_INELIGIBLE
;
4555 c
->cmd_type
= CMD_IOACCEL2
;
4556 /* Adjust the DMA address to point to the accelerated command buffer */
4557 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
4558 (c
->cmdindex
* sizeof(*cp
));
4559 BUG_ON(c
->busaddr
& 0x0000007F);
4561 memset(cp
, 0, sizeof(*cp
));
4562 cp
->IU_type
= IOACCEL2_IU_TYPE
;
4564 use_sg
= scsi_dma_map(cmd
);
4566 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4572 if (use_sg
> h
->ioaccel_maxsg
) {
4573 addr64
= le64_to_cpu(
4574 h
->ioaccel2_cmd_sg_list
[c
->cmdindex
]->address
);
4575 curr_sg
->address
= cpu_to_le64(addr64
);
4576 curr_sg
->length
= 0;
4577 curr_sg
->reserved
[0] = 0;
4578 curr_sg
->reserved
[1] = 0;
4579 curr_sg
->reserved
[2] = 0;
4580 curr_sg
->chain_indicator
= 0x80;
4582 curr_sg
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
4584 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4585 addr64
= (u64
) sg_dma_address(sg
);
4586 len
= sg_dma_len(sg
);
4588 curr_sg
->address
= cpu_to_le64(addr64
);
4589 curr_sg
->length
= cpu_to_le32(len
);
4590 curr_sg
->reserved
[0] = 0;
4591 curr_sg
->reserved
[1] = 0;
4592 curr_sg
->reserved
[2] = 0;
4593 curr_sg
->chain_indicator
= 0;
4597 switch (cmd
->sc_data_direction
) {
4599 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4600 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
4602 case DMA_FROM_DEVICE
:
4603 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4604 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
4607 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4608 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4611 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4612 cmd
->sc_data_direction
);
4617 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4618 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4621 /* Set encryption parameters, if necessary */
4622 set_encrypt_ioaccel2(h
, c
, cp
);
4624 cp
->scsi_nexus
= cpu_to_le32(ioaccel_handle
);
4625 cp
->Tag
= cpu_to_le32(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
4626 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
4628 cp
->data_len
= cpu_to_le32(total_len
);
4629 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
4630 offsetof(struct io_accel2_cmd
, error_data
));
4631 cp
->err_len
= cpu_to_le32(sizeof(cp
->error_data
));
4633 /* fill in sg elements */
4634 if (use_sg
> h
->ioaccel_maxsg
) {
4636 cp
->sg
[0].length
= cpu_to_le32(use_sg
* sizeof(cp
->sg
[0]));
4637 if (hpsa_map_ioaccel2_sg_chain_block(h
, cp
, c
)) {
4638 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4639 scsi_dma_unmap(cmd
);
4643 cp
->sg_count
= (u8
) use_sg
;
4645 enqueue_cmd_and_start_io(h
, c
);
4650 * Queue a command to the correct I/O accelerator path.
4652 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
4653 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4654 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4656 /* Try to honor the device's queue depth */
4657 if (atomic_inc_return(&phys_disk
->ioaccel_cmds_out
) >
4658 phys_disk
->queue_depth
) {
4659 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4660 return IO_ACCEL_INELIGIBLE
;
4662 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
4663 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
4664 cdb
, cdb_len
, scsi3addr
,
4667 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
4668 cdb
, cdb_len
, scsi3addr
,
4672 static void raid_map_helper(struct raid_map_data
*map
,
4673 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
4675 if (offload_to_mirror
== 0) {
4676 /* use physical disk in the first mirrored group. */
4677 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4681 /* determine mirror group that *map_index indicates */
4682 *current_group
= *map_index
/
4683 le16_to_cpu(map
->data_disks_per_row
);
4684 if (offload_to_mirror
== *current_group
)
4686 if (*current_group
< le16_to_cpu(map
->layout_map_count
) - 1) {
4687 /* select map index from next group */
4688 *map_index
+= le16_to_cpu(map
->data_disks_per_row
);
4691 /* select map index from first group */
4692 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4695 } while (offload_to_mirror
!= *current_group
);
4699 * Attempt to perform offload RAID mapping for a logical volume I/O.
4701 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
4702 struct CommandList
*c
)
4704 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4705 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4706 struct raid_map_data
*map
= &dev
->raid_map
;
4707 struct raid_map_disk_data
*dd
= &map
->data
[0];
4710 u64 first_block
, last_block
;
4713 u64 first_row
, last_row
;
4714 u32 first_row_offset
, last_row_offset
;
4715 u32 first_column
, last_column
;
4716 u64 r0_first_row
, r0_last_row
;
4717 u32 r5or6_blocks_per_row
;
4718 u64 r5or6_first_row
, r5or6_last_row
;
4719 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
4720 u32 r5or6_first_column
, r5or6_last_column
;
4721 u32 total_disks_per_row
;
4723 u32 first_group
, last_group
, current_group
;
4731 #if BITS_PER_LONG == 32
4734 int offload_to_mirror
;
4736 /* check for valid opcode, get LBA and block count */
4737 switch (cmd
->cmnd
[0]) {
4741 first_block
= get_unaligned_be16(&cmd
->cmnd
[2]);
4742 block_cnt
= cmd
->cmnd
[4];
4750 (((u64
) cmd
->cmnd
[2]) << 24) |
4751 (((u64
) cmd
->cmnd
[3]) << 16) |
4752 (((u64
) cmd
->cmnd
[4]) << 8) |
4755 (((u32
) cmd
->cmnd
[7]) << 8) |
4762 (((u64
) cmd
->cmnd
[2]) << 24) |
4763 (((u64
) cmd
->cmnd
[3]) << 16) |
4764 (((u64
) cmd
->cmnd
[4]) << 8) |
4767 (((u32
) cmd
->cmnd
[6]) << 24) |
4768 (((u32
) cmd
->cmnd
[7]) << 16) |
4769 (((u32
) cmd
->cmnd
[8]) << 8) |
4776 (((u64
) cmd
->cmnd
[2]) << 56) |
4777 (((u64
) cmd
->cmnd
[3]) << 48) |
4778 (((u64
) cmd
->cmnd
[4]) << 40) |
4779 (((u64
) cmd
->cmnd
[5]) << 32) |
4780 (((u64
) cmd
->cmnd
[6]) << 24) |
4781 (((u64
) cmd
->cmnd
[7]) << 16) |
4782 (((u64
) cmd
->cmnd
[8]) << 8) |
4785 (((u32
) cmd
->cmnd
[10]) << 24) |
4786 (((u32
) cmd
->cmnd
[11]) << 16) |
4787 (((u32
) cmd
->cmnd
[12]) << 8) |
4791 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
4793 last_block
= first_block
+ block_cnt
- 1;
4795 /* check for write to non-RAID-0 */
4796 if (is_write
&& dev
->raid_level
!= 0)
4797 return IO_ACCEL_INELIGIBLE
;
4799 /* check for invalid block or wraparound */
4800 if (last_block
>= le64_to_cpu(map
->volume_blk_cnt
) ||
4801 last_block
< first_block
)
4802 return IO_ACCEL_INELIGIBLE
;
4804 /* calculate stripe information for the request */
4805 blocks_per_row
= le16_to_cpu(map
->data_disks_per_row
) *
4806 le16_to_cpu(map
->strip_size
);
4807 strip_size
= le16_to_cpu(map
->strip_size
);
4808 #if BITS_PER_LONG == 32
4809 tmpdiv
= first_block
;
4810 (void) do_div(tmpdiv
, blocks_per_row
);
4812 tmpdiv
= last_block
;
4813 (void) do_div(tmpdiv
, blocks_per_row
);
4815 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
4816 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
4817 tmpdiv
= first_row_offset
;
4818 (void) do_div(tmpdiv
, strip_size
);
4819 first_column
= tmpdiv
;
4820 tmpdiv
= last_row_offset
;
4821 (void) do_div(tmpdiv
, strip_size
);
4822 last_column
= tmpdiv
;
4824 first_row
= first_block
/ blocks_per_row
;
4825 last_row
= last_block
/ blocks_per_row
;
4826 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
4827 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
4828 first_column
= first_row_offset
/ strip_size
;
4829 last_column
= last_row_offset
/ strip_size
;
4832 /* if this isn't a single row/column then give to the controller */
4833 if ((first_row
!= last_row
) || (first_column
!= last_column
))
4834 return IO_ACCEL_INELIGIBLE
;
4836 /* proceeding with driver mapping */
4837 total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
4838 le16_to_cpu(map
->metadata_disks_per_row
);
4839 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
4840 le16_to_cpu(map
->row_cnt
);
4841 map_index
= (map_row
* total_disks_per_row
) + first_column
;
4843 switch (dev
->raid_level
) {
4845 break; /* nothing special to do */
4847 /* Handles load balance across RAID 1 members.
4848 * (2-drive R1 and R10 with even # of drives.)
4849 * Appropriate for SSDs, not optimal for HDDs
4851 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 2);
4852 if (dev
->offload_to_mirror
)
4853 map_index
+= le16_to_cpu(map
->data_disks_per_row
);
4854 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
4857 /* Handles N-way mirrors (R1-ADM)
4858 * and R10 with # of drives divisible by 3.)
4860 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 3);
4862 offload_to_mirror
= dev
->offload_to_mirror
;
4863 raid_map_helper(map
, offload_to_mirror
,
4864 &map_index
, ¤t_group
);
4865 /* set mirror group to use next time */
4867 (offload_to_mirror
>=
4868 le16_to_cpu(map
->layout_map_count
) - 1)
4869 ? 0 : offload_to_mirror
+ 1;
4870 dev
->offload_to_mirror
= offload_to_mirror
;
4871 /* Avoid direct use of dev->offload_to_mirror within this
4872 * function since multiple threads might simultaneously
4873 * increment it beyond the range of dev->layout_map_count -1.
4878 if (le16_to_cpu(map
->layout_map_count
) <= 1)
4881 /* Verify first and last block are in same RAID group */
4882 r5or6_blocks_per_row
=
4883 le16_to_cpu(map
->strip_size
) *
4884 le16_to_cpu(map
->data_disks_per_row
);
4885 BUG_ON(r5or6_blocks_per_row
== 0);
4886 stripesize
= r5or6_blocks_per_row
*
4887 le16_to_cpu(map
->layout_map_count
);
4888 #if BITS_PER_LONG == 32
4889 tmpdiv
= first_block
;
4890 first_group
= do_div(tmpdiv
, stripesize
);
4891 tmpdiv
= first_group
;
4892 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
4893 first_group
= tmpdiv
;
4894 tmpdiv
= last_block
;
4895 last_group
= do_div(tmpdiv
, stripesize
);
4896 tmpdiv
= last_group
;
4897 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
4898 last_group
= tmpdiv
;
4900 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
4901 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
4903 if (first_group
!= last_group
)
4904 return IO_ACCEL_INELIGIBLE
;
4906 /* Verify request is in a single row of RAID 5/6 */
4907 #if BITS_PER_LONG == 32
4908 tmpdiv
= first_block
;
4909 (void) do_div(tmpdiv
, stripesize
);
4910 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
4911 tmpdiv
= last_block
;
4912 (void) do_div(tmpdiv
, stripesize
);
4913 r5or6_last_row
= r0_last_row
= tmpdiv
;
4915 first_row
= r5or6_first_row
= r0_first_row
=
4916 first_block
/ stripesize
;
4917 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
4919 if (r5or6_first_row
!= r5or6_last_row
)
4920 return IO_ACCEL_INELIGIBLE
;
4923 /* Verify request is in a single column */
4924 #if BITS_PER_LONG == 32
4925 tmpdiv
= first_block
;
4926 first_row_offset
= do_div(tmpdiv
, stripesize
);
4927 tmpdiv
= first_row_offset
;
4928 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
4929 r5or6_first_row_offset
= first_row_offset
;
4930 tmpdiv
= last_block
;
4931 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
4932 tmpdiv
= r5or6_last_row_offset
;
4933 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
4934 tmpdiv
= r5or6_first_row_offset
;
4935 (void) do_div(tmpdiv
, map
->strip_size
);
4936 first_column
= r5or6_first_column
= tmpdiv
;
4937 tmpdiv
= r5or6_last_row_offset
;
4938 (void) do_div(tmpdiv
, map
->strip_size
);
4939 r5or6_last_column
= tmpdiv
;
4941 first_row_offset
= r5or6_first_row_offset
=
4942 (u32
)((first_block
% stripesize
) %
4943 r5or6_blocks_per_row
);
4945 r5or6_last_row_offset
=
4946 (u32
)((last_block
% stripesize
) %
4947 r5or6_blocks_per_row
);
4949 first_column
= r5or6_first_column
=
4950 r5or6_first_row_offset
/ le16_to_cpu(map
->strip_size
);
4952 r5or6_last_row_offset
/ le16_to_cpu(map
->strip_size
);
4954 if (r5or6_first_column
!= r5or6_last_column
)
4955 return IO_ACCEL_INELIGIBLE
;
4957 /* Request is eligible */
4958 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
4959 le16_to_cpu(map
->row_cnt
);
4961 map_index
= (first_group
*
4962 (le16_to_cpu(map
->row_cnt
) * total_disks_per_row
)) +
4963 (map_row
* total_disks_per_row
) + first_column
;
4966 return IO_ACCEL_INELIGIBLE
;
4969 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
4970 return IO_ACCEL_INELIGIBLE
;
4972 c
->phys_disk
= dev
->phys_disk
[map_index
];
4974 disk_handle
= dd
[map_index
].ioaccel_handle
;
4975 disk_block
= le64_to_cpu(map
->disk_starting_blk
) +
4976 first_row
* le16_to_cpu(map
->strip_size
) +
4977 (first_row_offset
- first_column
*
4978 le16_to_cpu(map
->strip_size
));
4979 disk_block_cnt
= block_cnt
;
4981 /* handle differing logical/physical block sizes */
4982 if (map
->phys_blk_shift
) {
4983 disk_block
<<= map
->phys_blk_shift
;
4984 disk_block_cnt
<<= map
->phys_blk_shift
;
4986 BUG_ON(disk_block_cnt
> 0xffff);
4988 /* build the new CDB for the physical disk I/O */
4989 if (disk_block
> 0xffffffff) {
4990 cdb
[0] = is_write
? WRITE_16
: READ_16
;
4992 cdb
[2] = (u8
) (disk_block
>> 56);
4993 cdb
[3] = (u8
) (disk_block
>> 48);
4994 cdb
[4] = (u8
) (disk_block
>> 40);
4995 cdb
[5] = (u8
) (disk_block
>> 32);
4996 cdb
[6] = (u8
) (disk_block
>> 24);
4997 cdb
[7] = (u8
) (disk_block
>> 16);
4998 cdb
[8] = (u8
) (disk_block
>> 8);
4999 cdb
[9] = (u8
) (disk_block
);
5000 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
5001 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
5002 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
5003 cdb
[13] = (u8
) (disk_block_cnt
);
5008 cdb
[0] = is_write
? WRITE_10
: READ_10
;
5010 cdb
[2] = (u8
) (disk_block
>> 24);
5011 cdb
[3] = (u8
) (disk_block
>> 16);
5012 cdb
[4] = (u8
) (disk_block
>> 8);
5013 cdb
[5] = (u8
) (disk_block
);
5015 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
5016 cdb
[8] = (u8
) (disk_block_cnt
);
5020 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
5022 dev
->phys_disk
[map_index
]);
5026 * Submit commands down the "normal" RAID stack path
5027 * All callers to hpsa_ciss_submit must check lockup_detected
5028 * beforehand, before (opt.) and after calling cmd_alloc
5030 static int hpsa_ciss_submit(struct ctlr_info
*h
,
5031 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
5032 unsigned char scsi3addr
[])
5034 cmd
->host_scribble
= (unsigned char *) c
;
5035 c
->cmd_type
= CMD_SCSI
;
5037 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
5038 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
5039 c
->Header
.tag
= cpu_to_le64((c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
));
5041 /* Fill in the request block... */
5043 c
->Request
.Timeout
= 0;
5044 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
5045 c
->Request
.CDBLen
= cmd
->cmd_len
;
5046 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
5047 switch (cmd
->sc_data_direction
) {
5049 c
->Request
.type_attr_dir
=
5050 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_WRITE
);
5052 case DMA_FROM_DEVICE
:
5053 c
->Request
.type_attr_dir
=
5054 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_READ
);
5057 c
->Request
.type_attr_dir
=
5058 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_NONE
);
5060 case DMA_BIDIRECTIONAL
:
5061 /* This can happen if a buggy application does a scsi passthru
5062 * and sets both inlen and outlen to non-zero. ( see
5063 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5066 c
->Request
.type_attr_dir
=
5067 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_RSVD
);
5068 /* This is technically wrong, and hpsa controllers should
5069 * reject it with CMD_INVALID, which is the most correct
5070 * response, but non-fibre backends appear to let it
5071 * slide by, and give the same results as if this field
5072 * were set correctly. Either way is acceptable for
5073 * our purposes here.
5079 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
5080 cmd
->sc_data_direction
);
5085 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
5086 hpsa_cmd_resolve_and_free(h
, c
);
5087 return SCSI_MLQUEUE_HOST_BUSY
;
5089 enqueue_cmd_and_start_io(h
, c
);
5090 /* the cmd'll come back via intr handler in complete_scsi_command() */
5094 static void hpsa_cmd_init(struct ctlr_info
*h
, int index
,
5095 struct CommandList
*c
)
5097 dma_addr_t cmd_dma_handle
, err_dma_handle
;
5099 /* Zero out all of commandlist except the last field, refcount */
5100 memset(c
, 0, offsetof(struct CommandList
, refcount
));
5101 c
->Header
.tag
= cpu_to_le64((u64
) (index
<< DIRECT_LOOKUP_SHIFT
));
5102 cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5103 c
->err_info
= h
->errinfo_pool
+ index
;
5104 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5105 err_dma_handle
= h
->errinfo_pool_dhandle
5106 + index
* sizeof(*c
->err_info
);
5107 c
->cmdindex
= index
;
5108 c
->busaddr
= (u32
) cmd_dma_handle
;
5109 c
->ErrDesc
.Addr
= cpu_to_le64((u64
) err_dma_handle
);
5110 c
->ErrDesc
.Len
= cpu_to_le32((u32
) sizeof(*c
->err_info
));
5112 c
->scsi_cmd
= SCSI_CMD_IDLE
;
5115 static void hpsa_preinitialize_commands(struct ctlr_info
*h
)
5119 for (i
= 0; i
< h
->nr_cmds
; i
++) {
5120 struct CommandList
*c
= h
->cmd_pool
+ i
;
5122 hpsa_cmd_init(h
, i
, c
);
5123 atomic_set(&c
->refcount
, 0);
5127 static inline void hpsa_cmd_partial_init(struct ctlr_info
*h
, int index
,
5128 struct CommandList
*c
)
5130 dma_addr_t cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5132 BUG_ON(c
->cmdindex
!= index
);
5134 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
5135 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5136 c
->busaddr
= (u32
) cmd_dma_handle
;
5139 static int hpsa_ioaccel_submit(struct ctlr_info
*h
,
5140 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
5141 unsigned char *scsi3addr
)
5143 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
5144 int rc
= IO_ACCEL_INELIGIBLE
;
5146 cmd
->host_scribble
= (unsigned char *) c
;
5148 if (dev
->offload_enabled
) {
5149 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5150 c
->cmd_type
= CMD_SCSI
;
5152 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
5153 if (rc
< 0) /* scsi_dma_map failed. */
5154 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5155 } else if (dev
->hba_ioaccel_enabled
) {
5156 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5157 c
->cmd_type
= CMD_SCSI
;
5159 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
5160 if (rc
< 0) /* scsi_dma_map failed. */
5161 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5166 static void hpsa_command_resubmit_worker(struct work_struct
*work
)
5168 struct scsi_cmnd
*cmd
;
5169 struct hpsa_scsi_dev_t
*dev
;
5170 struct CommandList
*c
= container_of(work
, struct CommandList
, work
);
5173 dev
= cmd
->device
->hostdata
;
5175 cmd
->result
= DID_NO_CONNECT
<< 16;
5176 return hpsa_cmd_free_and_done(c
->h
, c
, cmd
);
5178 if (c
->reset_pending
)
5179 return hpsa_cmd_resolve_and_free(c
->h
, c
);
5180 if (c
->abort_pending
)
5181 return hpsa_cmd_abort_and_free(c
->h
, c
, cmd
);
5182 if (c
->cmd_type
== CMD_IOACCEL2
) {
5183 struct ctlr_info
*h
= c
->h
;
5184 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5187 if (c2
->error_data
.serv_response
==
5188 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
) {
5189 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, dev
->scsi3addr
);
5192 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5194 * If we get here, it means dma mapping failed.
5195 * Try again via scsi mid layer, which will
5196 * then get SCSI_MLQUEUE_HOST_BUSY.
5198 cmd
->result
= DID_IMM_RETRY
<< 16;
5199 return hpsa_cmd_free_and_done(h
, c
, cmd
);
5201 /* else, fall thru and resubmit down CISS path */
5204 hpsa_cmd_partial_init(c
->h
, c
->cmdindex
, c
);
5205 if (hpsa_ciss_submit(c
->h
, c
, cmd
, dev
->scsi3addr
)) {
5207 * If we get here, it means dma mapping failed. Try
5208 * again via scsi mid layer, which will then get
5209 * SCSI_MLQUEUE_HOST_BUSY.
5211 * hpsa_ciss_submit will have already freed c
5212 * if it encountered a dma mapping failure.
5214 cmd
->result
= DID_IMM_RETRY
<< 16;
5215 cmd
->scsi_done(cmd
);
5219 /* Running in struct Scsi_Host->host_lock less mode */
5220 static int hpsa_scsi_queue_command(struct Scsi_Host
*sh
, struct scsi_cmnd
*cmd
)
5222 struct ctlr_info
*h
;
5223 struct hpsa_scsi_dev_t
*dev
;
5224 unsigned char scsi3addr
[8];
5225 struct CommandList
*c
;
5228 /* Get the ptr to our adapter structure out of cmd->host. */
5229 h
= sdev_to_hba(cmd
->device
);
5231 BUG_ON(cmd
->request
->tag
< 0);
5233 dev
= cmd
->device
->hostdata
;
5235 cmd
->result
= DID_NO_CONNECT
<< 16;
5236 cmd
->scsi_done(cmd
);
5240 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
5242 if (unlikely(lockup_detected(h
))) {
5243 cmd
->result
= DID_NO_CONNECT
<< 16;
5244 cmd
->scsi_done(cmd
);
5247 c
= cmd_tagged_alloc(h
, cmd
);
5250 * Call alternate submit routine for I/O accelerated commands.
5251 * Retries always go down the normal I/O path.
5253 if (likely(cmd
->retries
== 0 &&
5254 cmd
->request
->cmd_type
== REQ_TYPE_FS
&&
5255 h
->acciopath_status
)) {
5256 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, scsi3addr
);
5259 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5260 hpsa_cmd_resolve_and_free(h
, c
);
5261 return SCSI_MLQUEUE_HOST_BUSY
;
5264 return hpsa_ciss_submit(h
, c
, cmd
, scsi3addr
);
5267 static void hpsa_scan_complete(struct ctlr_info
*h
)
5269 unsigned long flags
;
5271 spin_lock_irqsave(&h
->scan_lock
, flags
);
5272 h
->scan_finished
= 1;
5273 wake_up_all(&h
->scan_wait_queue
);
5274 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5277 static void hpsa_scan_start(struct Scsi_Host
*sh
)
5279 struct ctlr_info
*h
= shost_to_hba(sh
);
5280 unsigned long flags
;
5283 * Don't let rescans be initiated on a controller known to be locked
5284 * up. If the controller locks up *during* a rescan, that thread is
5285 * probably hosed, but at least we can prevent new rescan threads from
5286 * piling up on a locked up controller.
5288 if (unlikely(lockup_detected(h
)))
5289 return hpsa_scan_complete(h
);
5291 /* wait until any scan already in progress is finished. */
5293 spin_lock_irqsave(&h
->scan_lock
, flags
);
5294 if (h
->scan_finished
)
5296 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5297 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
5298 /* Note: We don't need to worry about a race between this
5299 * thread and driver unload because the midlayer will
5300 * have incremented the reference count, so unload won't
5301 * happen if we're in here.
5304 h
->scan_finished
= 0; /* mark scan as in progress */
5305 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5307 if (unlikely(lockup_detected(h
)))
5308 return hpsa_scan_complete(h
);
5310 hpsa_update_scsi_devices(h
);
5312 hpsa_scan_complete(h
);
5315 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
5317 struct hpsa_scsi_dev_t
*logical_drive
= sdev
->hostdata
;
5324 else if (qdepth
> logical_drive
->queue_depth
)
5325 qdepth
= logical_drive
->queue_depth
;
5327 return scsi_change_queue_depth(sdev
, qdepth
);
5330 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
5331 unsigned long elapsed_time
)
5333 struct ctlr_info
*h
= shost_to_hba(sh
);
5334 unsigned long flags
;
5337 spin_lock_irqsave(&h
->scan_lock
, flags
);
5338 finished
= h
->scan_finished
;
5339 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5343 static int hpsa_scsi_host_alloc(struct ctlr_info
*h
)
5345 struct Scsi_Host
*sh
;
5347 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
5349 dev_err(&h
->pdev
->dev
, "scsi_host_alloc failed\n");
5356 sh
->max_channel
= 3;
5357 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
5358 sh
->max_lun
= HPSA_MAX_LUN
;
5359 sh
->max_id
= HPSA_MAX_LUN
;
5360 sh
->can_queue
= h
->nr_cmds
- HPSA_NRESERVED_CMDS
;
5361 sh
->cmd_per_lun
= sh
->can_queue
;
5362 sh
->sg_tablesize
= h
->maxsgentries
;
5363 sh
->transportt
= hpsa_sas_transport_template
;
5364 sh
->hostdata
[0] = (unsigned long) h
;
5365 sh
->irq
= h
->intr
[h
->intr_mode
];
5366 sh
->unique_id
= sh
->irq
;
5372 static int hpsa_scsi_add_host(struct ctlr_info
*h
)
5376 rv
= scsi_add_host(h
->scsi_host
, &h
->pdev
->dev
);
5378 dev_err(&h
->pdev
->dev
, "scsi_add_host failed\n");
5381 scsi_scan_host(h
->scsi_host
);
5386 * The block layer has already gone to the trouble of picking out a unique,
5387 * small-integer tag for this request. We use an offset from that value as
5388 * an index to select our command block. (The offset allows us to reserve the
5389 * low-numbered entries for our own uses.)
5391 static int hpsa_get_cmd_index(struct scsi_cmnd
*scmd
)
5393 int idx
= scmd
->request
->tag
;
5398 /* Offset to leave space for internal cmds. */
5399 return idx
+= HPSA_NRESERVED_CMDS
;
5403 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5404 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5406 static int hpsa_send_test_unit_ready(struct ctlr_info
*h
,
5407 struct CommandList
*c
, unsigned char lunaddr
[],
5412 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5413 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
5414 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
5415 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5418 /* no unmap needed here because no data xfer. */
5420 /* Check if the unit is already ready. */
5421 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
5425 * The first command sent after reset will receive "unit attention" to
5426 * indicate that the LUN has been reset...this is actually what we're
5427 * looking for (but, success is good too).
5429 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5430 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
5431 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
5432 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
5439 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5440 * returns zero when the unit is ready, and non-zero when giving up.
5442 static int hpsa_wait_for_test_unit_ready(struct ctlr_info
*h
,
5443 struct CommandList
*c
,
5444 unsigned char lunaddr
[], int reply_queue
)
5448 int waittime
= 1; /* seconds */
5450 /* Send test unit ready until device ready, or give up. */
5451 for (count
= 0; count
< HPSA_TUR_RETRY_LIMIT
; count
++) {
5454 * Wait for a bit. do this first, because if we send
5455 * the TUR right away, the reset will just abort it.
5457 msleep(1000 * waittime
);
5459 rc
= hpsa_send_test_unit_ready(h
, c
, lunaddr
, reply_queue
);
5463 /* Increase wait time with each try, up to a point. */
5464 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
5467 dev_warn(&h
->pdev
->dev
,
5468 "waiting %d secs for device to become ready.\n",
5475 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
5476 unsigned char lunaddr
[],
5483 struct CommandList
*c
;
5488 * If no specific reply queue was requested, then send the TUR
5489 * repeatedly, requesting a reply on each reply queue; otherwise execute
5490 * the loop exactly once using only the specified queue.
5492 if (reply_queue
== DEFAULT_REPLY_QUEUE
) {
5494 last_queue
= h
->nreply_queues
- 1;
5496 first_queue
= reply_queue
;
5497 last_queue
= reply_queue
;
5500 for (rq
= first_queue
; rq
<= last_queue
; rq
++) {
5501 rc
= hpsa_wait_for_test_unit_ready(h
, c
, lunaddr
, rq
);
5507 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
5509 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
5515 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5516 * complaining. Doing a host- or bus-reset can't do anything good here.
5518 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
5521 struct ctlr_info
*h
;
5522 struct hpsa_scsi_dev_t
*dev
;
5526 /* find the controller to which the command to be aborted was sent */
5527 h
= sdev_to_hba(scsicmd
->device
);
5528 if (h
== NULL
) /* paranoia */
5531 if (lockup_detected(h
))
5534 dev
= scsicmd
->device
->hostdata
;
5536 dev_err(&h
->pdev
->dev
, "%s: device lookup failed\n", __func__
);
5540 /* if controller locked up, we can guarantee command won't complete */
5541 if (lockup_detected(h
)) {
5542 snprintf(msg
, sizeof(msg
),
5543 "cmd %d RESET FAILED, lockup detected",
5544 hpsa_get_cmd_index(scsicmd
));
5545 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5549 /* this reset request might be the result of a lockup; check */
5550 if (detect_controller_lockup(h
)) {
5551 snprintf(msg
, sizeof(msg
),
5552 "cmd %d RESET FAILED, new lockup detected",
5553 hpsa_get_cmd_index(scsicmd
));
5554 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5558 /* Do not attempt on controller */
5559 if (is_hba_lunid(dev
->scsi3addr
))
5562 if (is_logical_dev_addr_mode(dev
->scsi3addr
))
5563 reset_type
= HPSA_DEVICE_RESET_MSG
;
5565 reset_type
= HPSA_PHYS_TARGET_RESET
;
5567 sprintf(msg
, "resetting %s",
5568 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ");
5569 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5571 h
->reset_in_progress
= 1;
5573 /* send a reset to the SCSI LUN which the command was sent to */
5574 rc
= hpsa_do_reset(h
, dev
, dev
->scsi3addr
, reset_type
,
5575 DEFAULT_REPLY_QUEUE
);
5576 sprintf(msg
, "reset %s %s",
5577 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ",
5578 rc
== 0 ? "completed successfully" : "failed");
5579 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5580 h
->reset_in_progress
= 0;
5581 return rc
== 0 ? SUCCESS
: FAILED
;
5584 static void swizzle_abort_tag(u8
*tag
)
5588 memcpy(original_tag
, tag
, 8);
5589 tag
[0] = original_tag
[3];
5590 tag
[1] = original_tag
[2];
5591 tag
[2] = original_tag
[1];
5592 tag
[3] = original_tag
[0];
5593 tag
[4] = original_tag
[7];
5594 tag
[5] = original_tag
[6];
5595 tag
[6] = original_tag
[5];
5596 tag
[7] = original_tag
[4];
5599 static void hpsa_get_tag(struct ctlr_info
*h
,
5600 struct CommandList
*c
, __le32
*taglower
, __le32
*tagupper
)
5603 if (c
->cmd_type
== CMD_IOACCEL1
) {
5604 struct io_accel1_cmd
*cm1
= (struct io_accel1_cmd
*)
5605 &h
->ioaccel_cmd_pool
[c
->cmdindex
];
5606 tag
= le64_to_cpu(cm1
->tag
);
5607 *tagupper
= cpu_to_le32(tag
>> 32);
5608 *taglower
= cpu_to_le32(tag
);
5611 if (c
->cmd_type
== CMD_IOACCEL2
) {
5612 struct io_accel2_cmd
*cm2
= (struct io_accel2_cmd
*)
5613 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5614 /* upper tag not used in ioaccel2 mode */
5615 memset(tagupper
, 0, sizeof(*tagupper
));
5616 *taglower
= cm2
->Tag
;
5619 tag
= le64_to_cpu(c
->Header
.tag
);
5620 *tagupper
= cpu_to_le32(tag
>> 32);
5621 *taglower
= cpu_to_le32(tag
);
5624 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
5625 struct CommandList
*abort
, int reply_queue
)
5628 struct CommandList
*c
;
5629 struct ErrorInfo
*ei
;
5630 __le32 tagupper
, taglower
;
5634 /* fill_cmd can't fail here, no buffer to map */
5635 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &abort
->Header
.tag
,
5636 0, 0, scsi3addr
, TYPE_MSG
);
5637 if (h
->needs_abort_tags_swizzled
)
5638 swizzle_abort_tag(&c
->Request
.CDB
[4]);
5639 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5640 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5641 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5642 __func__
, tagupper
, taglower
);
5643 /* no unmap needed here because no data xfer. */
5646 switch (ei
->CommandStatus
) {
5649 case CMD_TMF_STATUS
:
5650 rc
= hpsa_evaluate_tmf_status(h
, c
);
5652 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
5656 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5657 __func__
, tagupper
, taglower
);
5658 hpsa_scsi_interpret_error(h
, c
);
5663 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n",
5664 __func__
, tagupper
, taglower
);
5668 static void setup_ioaccel2_abort_cmd(struct CommandList
*c
, struct ctlr_info
*h
,
5669 struct CommandList
*command_to_abort
, int reply_queue
)
5671 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5672 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
5673 struct io_accel2_cmd
*c2a
=
5674 &h
->ioaccel2_cmd_pool
[command_to_abort
->cmdindex
];
5675 struct scsi_cmnd
*scmd
= command_to_abort
->scsi_cmd
;
5676 struct hpsa_scsi_dev_t
*dev
= scmd
->device
->hostdata
;
5679 * We're overlaying struct hpsa_tmf_struct on top of something which
5680 * was allocated as a struct io_accel2_cmd, so we better be sure it
5681 * actually fits, and doesn't overrun the error info space.
5683 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct
) >
5684 sizeof(struct io_accel2_cmd
));
5685 BUG_ON(offsetof(struct io_accel2_cmd
, error_data
) <
5686 offsetof(struct hpsa_tmf_struct
, error_len
) +
5687 sizeof(ac
->error_len
));
5689 c
->cmd_type
= IOACCEL2_TMF
;
5690 c
->scsi_cmd
= SCSI_CMD_BUSY
;
5692 /* Adjust the DMA address to point to the accelerated command buffer */
5693 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
5694 (c
->cmdindex
* sizeof(struct io_accel2_cmd
));
5695 BUG_ON(c
->busaddr
& 0x0000007F);
5697 memset(ac
, 0, sizeof(*c2
)); /* yes this is correct */
5698 ac
->iu_type
= IOACCEL2_IU_TMF_TYPE
;
5699 ac
->reply_queue
= reply_queue
;
5700 ac
->tmf
= IOACCEL2_TMF_ABORT
;
5701 ac
->it_nexus
= cpu_to_le32(dev
->ioaccel_handle
);
5702 memset(ac
->lun_id
, 0, sizeof(ac
->lun_id
));
5703 ac
->tag
= cpu_to_le64(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
5704 ac
->abort_tag
= cpu_to_le64(le32_to_cpu(c2a
->Tag
));
5705 ac
->error_ptr
= cpu_to_le64(c
->busaddr
+
5706 offsetof(struct io_accel2_cmd
, error_data
));
5707 ac
->error_len
= cpu_to_le32(sizeof(c2
->error_data
));
5710 /* ioaccel2 path firmware cannot handle abort task requests.
5711 * Change abort requests to physical target reset, and send to the
5712 * address of the physical disk used for the ioaccel 2 command.
5713 * Return 0 on success (IO_OK)
5717 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info
*h
,
5718 unsigned char *scsi3addr
, struct CommandList
*abort
, int reply_queue
)
5721 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
5722 struct hpsa_scsi_dev_t
*dev
; /* device to which scsi cmd was sent */
5723 unsigned char phys_scsi3addr
[8]; /* addr of phys disk with volume */
5724 unsigned char *psa
= &phys_scsi3addr
[0];
5726 /* Get a pointer to the hpsa logical device. */
5727 scmd
= abort
->scsi_cmd
;
5728 dev
= (struct hpsa_scsi_dev_t
*)(scmd
->device
->hostdata
);
5730 dev_warn(&h
->pdev
->dev
,
5731 "Cannot abort: no device pointer for command.\n");
5732 return -1; /* not abortable */
5735 if (h
->raid_offload_debug
> 0)
5736 dev_info(&h
->pdev
->dev
,
5737 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5738 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
5740 scsi3addr
[0], scsi3addr
[1], scsi3addr
[2], scsi3addr
[3],
5741 scsi3addr
[4], scsi3addr
[5], scsi3addr
[6], scsi3addr
[7]);
5743 if (!dev
->offload_enabled
) {
5744 dev_warn(&h
->pdev
->dev
,
5745 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5746 return -1; /* not abortable */
5749 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5750 if (!hpsa_get_pdisk_of_ioaccel2(h
, abort
, psa
)) {
5751 dev_warn(&h
->pdev
->dev
, "Can't abort: Failed lookup of physical address.\n");
5752 return -1; /* not abortable */
5755 /* send the reset */
5756 if (h
->raid_offload_debug
> 0)
5757 dev_info(&h
->pdev
->dev
,
5758 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5759 psa
[0], psa
[1], psa
[2], psa
[3],
5760 psa
[4], psa
[5], psa
[6], psa
[7]);
5761 rc
= hpsa_do_reset(h
, dev
, psa
, HPSA_RESET_TYPE_TARGET
, reply_queue
);
5763 dev_warn(&h
->pdev
->dev
,
5764 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5765 psa
[0], psa
[1], psa
[2], psa
[3],
5766 psa
[4], psa
[5], psa
[6], psa
[7]);
5767 return rc
; /* failed to reset */
5770 /* wait for device to recover */
5771 if (wait_for_device_to_become_ready(h
, psa
, reply_queue
) != 0) {
5772 dev_warn(&h
->pdev
->dev
,
5773 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5774 psa
[0], psa
[1], psa
[2], psa
[3],
5775 psa
[4], psa
[5], psa
[6], psa
[7]);
5776 return -1; /* failed to recover */
5779 /* device recovered */
5780 dev_info(&h
->pdev
->dev
,
5781 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5782 psa
[0], psa
[1], psa
[2], psa
[3],
5783 psa
[4], psa
[5], psa
[6], psa
[7]);
5785 return rc
; /* success */
5788 static int hpsa_send_abort_ioaccel2(struct ctlr_info
*h
,
5789 struct CommandList
*abort
, int reply_queue
)
5792 struct CommandList
*c
;
5793 __le32 taglower
, tagupper
;
5794 struct hpsa_scsi_dev_t
*dev
;
5795 struct io_accel2_cmd
*c2
;
5797 dev
= abort
->scsi_cmd
->device
->hostdata
;
5798 if (!dev
->offload_enabled
&& !dev
->hba_ioaccel_enabled
)
5802 setup_ioaccel2_abort_cmd(c
, h
, abort
, reply_queue
);
5803 c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5804 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5805 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5806 dev_dbg(&h
->pdev
->dev
,
5807 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5808 __func__
, tagupper
, taglower
);
5809 /* no unmap needed here because no data xfer. */
5811 dev_dbg(&h
->pdev
->dev
,
5812 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5813 __func__
, tagupper
, taglower
, c2
->error_data
.serv_response
);
5814 switch (c2
->error_data
.serv_response
) {
5815 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
5816 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
5819 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
5820 case IOACCEL2_SERV_RESPONSE_FAILURE
:
5821 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
5825 dev_warn(&h
->pdev
->dev
,
5826 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5827 __func__
, tagupper
, taglower
,
5828 c2
->error_data
.serv_response
);
5832 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n", __func__
,
5833 tagupper
, taglower
);
5837 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
5838 unsigned char *scsi3addr
, struct CommandList
*abort
, int reply_queue
)
5841 * ioccelerator mode 2 commands should be aborted via the
5842 * accelerated path, since RAID path is unaware of these commands,
5843 * but not all underlying firmware can handle abort TMF.
5844 * Change abort to physical device reset when abort TMF is unsupported.
5846 if (abort
->cmd_type
== CMD_IOACCEL2
) {
5847 if (HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
)
5848 return hpsa_send_abort_ioaccel2(h
, abort
,
5851 return hpsa_send_reset_as_abort_ioaccel2(h
, scsi3addr
,
5852 abort
, reply_queue
);
5854 return hpsa_send_abort(h
, scsi3addr
, abort
, reply_queue
);
5857 /* Find out which reply queue a command was meant to return on */
5858 static int hpsa_extract_reply_queue(struct ctlr_info
*h
,
5859 struct CommandList
*c
)
5861 if (c
->cmd_type
== CMD_IOACCEL2
)
5862 return h
->ioaccel2_cmd_pool
[c
->cmdindex
].reply_queue
;
5863 return c
->Header
.ReplyQueue
;
5867 * Limit concurrency of abort commands to prevent
5868 * over-subscription of commands
5870 static inline int wait_for_available_abort_cmd(struct ctlr_info
*h
)
5872 #define ABORT_CMD_WAIT_MSECS 5000
5873 return !wait_event_timeout(h
->abort_cmd_wait_queue
,
5874 atomic_dec_if_positive(&h
->abort_cmds_available
) >= 0,
5875 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS
));
5878 /* Send an abort for the specified command.
5879 * If the device and controller support it,
5880 * send a task abort request.
5882 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
5886 struct ctlr_info
*h
;
5887 struct hpsa_scsi_dev_t
*dev
;
5888 struct CommandList
*abort
; /* pointer to command to be aborted */
5889 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
5890 char msg
[256]; /* For debug messaging. */
5892 __le32 tagupper
, taglower
;
5893 int refcount
, reply_queue
;
5898 if (sc
->device
== NULL
)
5901 /* Find the controller of the command to be aborted */
5902 h
= sdev_to_hba(sc
->device
);
5906 /* Find the device of the command to be aborted */
5907 dev
= sc
->device
->hostdata
;
5909 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
5914 /* If controller locked up, we can guarantee command won't complete */
5915 if (lockup_detected(h
)) {
5916 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5917 "ABORT FAILED, lockup detected");
5921 /* This is a good time to check if controller lockup has occurred */
5922 if (detect_controller_lockup(h
)) {
5923 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5924 "ABORT FAILED, new lockup detected");
5928 /* Check that controller supports some kind of task abort */
5929 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
5930 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
5933 memset(msg
, 0, sizeof(msg
));
5934 ml
+= sprintf(msg
+ml
, "scsi %d:%d:%d:%llu %s %p",
5935 h
->scsi_host
->host_no
, sc
->device
->channel
,
5936 sc
->device
->id
, sc
->device
->lun
,
5937 "Aborting command", sc
);
5939 /* Get SCSI command to be aborted */
5940 abort
= (struct CommandList
*) sc
->host_scribble
;
5941 if (abort
== NULL
) {
5942 /* This can happen if the command already completed. */
5945 refcount
= atomic_inc_return(&abort
->refcount
);
5946 if (refcount
== 1) { /* Command is done already. */
5951 /* Don't bother trying the abort if we know it won't work. */
5952 if (abort
->cmd_type
!= CMD_IOACCEL2
&&
5953 abort
->cmd_type
!= CMD_IOACCEL1
&& !dev
->supports_aborts
) {
5959 * Check that we're aborting the right command.
5960 * It's possible the CommandList already completed and got re-used.
5962 if (abort
->scsi_cmd
!= sc
) {
5967 abort
->abort_pending
= true;
5968 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5969 reply_queue
= hpsa_extract_reply_queue(h
, abort
);
5970 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ", tagupper
, taglower
);
5971 as
= abort
->scsi_cmd
;
5973 ml
+= sprintf(msg
+ml
,
5974 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5975 as
->cmd_len
, as
->cmnd
[0], as
->cmnd
[1],
5977 dev_warn(&h
->pdev
->dev
, "%s BEING SENT\n", msg
);
5978 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, "Aborting command");
5981 * Command is in flight, or possibly already completed
5982 * by the firmware (but not to the scsi mid layer) but we can't
5983 * distinguish which. Send the abort down.
5985 if (wait_for_available_abort_cmd(h
)) {
5986 dev_warn(&h
->pdev
->dev
,
5987 "%s FAILED, timeout waiting for an abort command to become available.\n",
5992 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
, reply_queue
);
5993 atomic_inc(&h
->abort_cmds_available
);
5994 wake_up_all(&h
->abort_cmd_wait_queue
);
5996 dev_warn(&h
->pdev
->dev
, "%s SENT, FAILED\n", msg
);
5997 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5998 "FAILED to abort command");
6002 dev_info(&h
->pdev
->dev
, "%s SENT, SUCCESS\n", msg
);
6003 wait_event(h
->event_sync_wait_queue
,
6004 abort
->scsi_cmd
!= sc
|| lockup_detected(h
));
6006 return !lockup_detected(h
) ? SUCCESS
: FAILED
;
6010 * For operations with an associated SCSI command, a command block is allocated
6011 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6012 * block request tag as an index into a table of entries. cmd_tagged_free() is
6013 * the complement, although cmd_free() may be called instead.
6015 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
6016 struct scsi_cmnd
*scmd
)
6018 int idx
= hpsa_get_cmd_index(scmd
);
6019 struct CommandList
*c
= h
->cmd_pool
+ idx
;
6021 if (idx
< HPSA_NRESERVED_CMDS
|| idx
>= h
->nr_cmds
) {
6022 dev_err(&h
->pdev
->dev
, "Bad block tag: %d not in [%d..%d]\n",
6023 idx
, HPSA_NRESERVED_CMDS
, h
->nr_cmds
- 1);
6024 /* The index value comes from the block layer, so if it's out of
6025 * bounds, it's probably not our bug.
6030 atomic_inc(&c
->refcount
);
6031 if (unlikely(!hpsa_is_cmd_idle(c
))) {
6033 * We expect that the SCSI layer will hand us a unique tag
6034 * value. Thus, there should never be a collision here between
6035 * two requests...because if the selected command isn't idle
6036 * then someone is going to be very disappointed.
6038 dev_err(&h
->pdev
->dev
,
6039 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6041 if (c
->scsi_cmd
!= NULL
)
6042 scsi_print_command(c
->scsi_cmd
);
6043 scsi_print_command(scmd
);
6046 hpsa_cmd_partial_init(h
, idx
, c
);
6050 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
)
6053 * Release our reference to the block. We don't need to do anything
6054 * else to free it, because it is accessed by index. (There's no point
6055 * in checking the result of the decrement, since we cannot guarantee
6056 * that there isn't a concurrent abort which is also accessing it.)
6058 (void)atomic_dec(&c
->refcount
);
6062 * For operations that cannot sleep, a command block is allocated at init,
6063 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6064 * which ones are free or in use. Lock must be held when calling this.
6065 * cmd_free() is the complement.
6066 * This function never gives up and returns NULL. If it hangs,
6067 * another thread must call cmd_free() to free some tags.
6070 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
6072 struct CommandList
*c
;
6077 * There is some *extremely* small but non-zero chance that that
6078 * multiple threads could get in here, and one thread could
6079 * be scanning through the list of bits looking for a free
6080 * one, but the free ones are always behind him, and other
6081 * threads sneak in behind him and eat them before he can
6082 * get to them, so that while there is always a free one, a
6083 * very unlucky thread might be starved anyway, never able to
6084 * beat the other threads. In reality, this happens so
6085 * infrequently as to be indistinguishable from never.
6087 * Note that we start allocating commands before the SCSI host structure
6088 * is initialized. Since the search starts at bit zero, this
6089 * all works, since we have at least one command structure available;
6090 * however, it means that the structures with the low indexes have to be
6091 * reserved for driver-initiated requests, while requests from the block
6092 * layer will use the higher indexes.
6096 i
= find_next_zero_bit(h
->cmd_pool_bits
,
6097 HPSA_NRESERVED_CMDS
,
6099 if (unlikely(i
>= HPSA_NRESERVED_CMDS
)) {
6103 c
= h
->cmd_pool
+ i
;
6104 refcount
= atomic_inc_return(&c
->refcount
);
6105 if (unlikely(refcount
> 1)) {
6106 cmd_free(h
, c
); /* already in use */
6107 offset
= (i
+ 1) % HPSA_NRESERVED_CMDS
;
6110 set_bit(i
& (BITS_PER_LONG
- 1),
6111 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6112 break; /* it's ours now. */
6114 hpsa_cmd_partial_init(h
, i
, c
);
6119 * This is the complementary operation to cmd_alloc(). Note, however, in some
6120 * corner cases it may also be used to free blocks allocated by
6121 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6122 * the clear-bit is harmless.
6124 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
6126 if (atomic_dec_and_test(&c
->refcount
)) {
6129 i
= c
- h
->cmd_pool
;
6130 clear_bit(i
& (BITS_PER_LONG
- 1),
6131 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6135 #ifdef CONFIG_COMPAT
6137 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
,
6140 IOCTL32_Command_struct __user
*arg32
=
6141 (IOCTL32_Command_struct __user
*) arg
;
6142 IOCTL_Command_struct arg64
;
6143 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
6147 memset(&arg64
, 0, sizeof(arg64
));
6149 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6150 sizeof(arg64
.LUN_info
));
6151 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6152 sizeof(arg64
.Request
));
6153 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6154 sizeof(arg64
.error_info
));
6155 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6156 err
|= get_user(cp
, &arg32
->buf
);
6157 arg64
.buf
= compat_ptr(cp
);
6158 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6163 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, p
);
6166 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6167 sizeof(arg32
->error_info
));
6173 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
6174 int cmd
, void __user
*arg
)
6176 BIG_IOCTL32_Command_struct __user
*arg32
=
6177 (BIG_IOCTL32_Command_struct __user
*) arg
;
6178 BIG_IOCTL_Command_struct arg64
;
6179 BIG_IOCTL_Command_struct __user
*p
=
6180 compat_alloc_user_space(sizeof(arg64
));
6184 memset(&arg64
, 0, sizeof(arg64
));
6186 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6187 sizeof(arg64
.LUN_info
));
6188 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6189 sizeof(arg64
.Request
));
6190 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6191 sizeof(arg64
.error_info
));
6192 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6193 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
6194 err
|= get_user(cp
, &arg32
->buf
);
6195 arg64
.buf
= compat_ptr(cp
);
6196 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6201 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, p
);
6204 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6205 sizeof(arg32
->error_info
));
6211 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
6214 case CCISS_GETPCIINFO
:
6215 case CCISS_GETINTINFO
:
6216 case CCISS_SETINTINFO
:
6217 case CCISS_GETNODENAME
:
6218 case CCISS_SETNODENAME
:
6219 case CCISS_GETHEARTBEAT
:
6220 case CCISS_GETBUSTYPES
:
6221 case CCISS_GETFIRMVER
:
6222 case CCISS_GETDRIVVER
:
6223 case CCISS_REVALIDVOLS
:
6224 case CCISS_DEREGDISK
:
6225 case CCISS_REGNEWDISK
:
6227 case CCISS_RESCANDISK
:
6228 case CCISS_GETLUNINFO
:
6229 return hpsa_ioctl(dev
, cmd
, arg
);
6231 case CCISS_PASSTHRU32
:
6232 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
6233 case CCISS_BIG_PASSTHRU32
:
6234 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
6237 return -ENOIOCTLCMD
;
6242 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6244 struct hpsa_pci_info pciinfo
;
6248 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
6249 pciinfo
.bus
= h
->pdev
->bus
->number
;
6250 pciinfo
.dev_fn
= h
->pdev
->devfn
;
6251 pciinfo
.board_id
= h
->board_id
;
6252 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
6257 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6259 DriverVer_type DriverVer
;
6260 unsigned char vmaj
, vmin
, vsubmin
;
6263 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
6264 &vmaj
, &vmin
, &vsubmin
);
6266 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
6267 "unrecognized.", HPSA_DRIVER_VERSION
);
6272 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
6275 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
6280 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6282 IOCTL_Command_struct iocommand
;
6283 struct CommandList
*c
;
6290 if (!capable(CAP_SYS_RAWIO
))
6292 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
6294 if ((iocommand
.buf_size
< 1) &&
6295 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
6298 if (iocommand
.buf_size
> 0) {
6299 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
6302 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
6303 /* Copy the data into the buffer we created */
6304 if (copy_from_user(buff
, iocommand
.buf
,
6305 iocommand
.buf_size
)) {
6310 memset(buff
, 0, iocommand
.buf_size
);
6315 /* Fill in the command type */
6316 c
->cmd_type
= CMD_IOCTL_PEND
;
6317 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6318 /* Fill in Command Header */
6319 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
6320 if (iocommand
.buf_size
> 0) { /* buffer to fill */
6321 c
->Header
.SGList
= 1;
6322 c
->Header
.SGTotal
= cpu_to_le16(1);
6323 } else { /* no buffers to fill */
6324 c
->Header
.SGList
= 0;
6325 c
->Header
.SGTotal
= cpu_to_le16(0);
6327 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
6329 /* Fill in Request block */
6330 memcpy(&c
->Request
, &iocommand
.Request
,
6331 sizeof(c
->Request
));
6333 /* Fill in the scatter gather information */
6334 if (iocommand
.buf_size
> 0) {
6335 temp64
= pci_map_single(h
->pdev
, buff
,
6336 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
6337 if (dma_mapping_error(&h
->pdev
->dev
, (dma_addr_t
) temp64
)) {
6338 c
->SG
[0].Addr
= cpu_to_le64(0);
6339 c
->SG
[0].Len
= cpu_to_le32(0);
6343 c
->SG
[0].Addr
= cpu_to_le64(temp64
);
6344 c
->SG
[0].Len
= cpu_to_le32(iocommand
.buf_size
);
6345 c
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* not chaining */
6347 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
6348 if (iocommand
.buf_size
> 0)
6349 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
6350 check_ioctl_unit_attention(h
, c
);
6356 /* Copy the error information out */
6357 memcpy(&iocommand
.error_info
, c
->err_info
,
6358 sizeof(iocommand
.error_info
));
6359 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
6363 if ((iocommand
.Request
.Type
.Direction
& XFER_READ
) &&
6364 iocommand
.buf_size
> 0) {
6365 /* Copy the data out of the buffer we created */
6366 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
6378 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6380 BIG_IOCTL_Command_struct
*ioc
;
6381 struct CommandList
*c
;
6382 unsigned char **buff
= NULL
;
6383 int *buff_size
= NULL
;
6389 BYTE __user
*data_ptr
;
6393 if (!capable(CAP_SYS_RAWIO
))
6395 ioc
= (BIG_IOCTL_Command_struct
*)
6396 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
6401 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
6405 if ((ioc
->buf_size
< 1) &&
6406 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
6410 /* Check kmalloc limits using all SGs */
6411 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
6415 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
6419 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
6424 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
6429 left
= ioc
->buf_size
;
6430 data_ptr
= ioc
->buf
;
6432 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
6433 buff_size
[sg_used
] = sz
;
6434 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
6435 if (buff
[sg_used
] == NULL
) {
6439 if (ioc
->Request
.Type
.Direction
& XFER_WRITE
) {
6440 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
6445 memset(buff
[sg_used
], 0, sz
);
6452 c
->cmd_type
= CMD_IOCTL_PEND
;
6453 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6454 c
->Header
.ReplyQueue
= 0;
6455 c
->Header
.SGList
= (u8
) sg_used
;
6456 c
->Header
.SGTotal
= cpu_to_le16(sg_used
);
6457 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
6458 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
6459 if (ioc
->buf_size
> 0) {
6461 for (i
= 0; i
< sg_used
; i
++) {
6462 temp64
= pci_map_single(h
->pdev
, buff
[i
],
6463 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
6464 if (dma_mapping_error(&h
->pdev
->dev
,
6465 (dma_addr_t
) temp64
)) {
6466 c
->SG
[i
].Addr
= cpu_to_le64(0);
6467 c
->SG
[i
].Len
= cpu_to_le32(0);
6468 hpsa_pci_unmap(h
->pdev
, c
, i
,
6469 PCI_DMA_BIDIRECTIONAL
);
6473 c
->SG
[i
].Addr
= cpu_to_le64(temp64
);
6474 c
->SG
[i
].Len
= cpu_to_le32(buff_size
[i
]);
6475 c
->SG
[i
].Ext
= cpu_to_le32(0);
6477 c
->SG
[--i
].Ext
= cpu_to_le32(HPSA_SG_LAST
);
6479 status
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
6481 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
6482 check_ioctl_unit_attention(h
, c
);
6488 /* Copy the error information out */
6489 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
6490 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
6494 if ((ioc
->Request
.Type
.Direction
& XFER_READ
) && ioc
->buf_size
> 0) {
6497 /* Copy the data out of the buffer we created */
6498 BYTE __user
*ptr
= ioc
->buf
;
6499 for (i
= 0; i
< sg_used
; i
++) {
6500 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
6504 ptr
+= buff_size
[i
];
6514 for (i
= 0; i
< sg_used
; i
++)
6523 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
6524 struct CommandList
*c
)
6526 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
6527 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
6528 (void) check_for_unit_attention(h
, c
);
6534 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
6536 struct ctlr_info
*h
;
6537 void __user
*argp
= (void __user
*)arg
;
6540 h
= sdev_to_hba(dev
);
6543 case CCISS_DEREGDISK
:
6544 case CCISS_REGNEWDISK
:
6546 hpsa_scan_start(h
->scsi_host
);
6548 case CCISS_GETPCIINFO
:
6549 return hpsa_getpciinfo_ioctl(h
, argp
);
6550 case CCISS_GETDRIVVER
:
6551 return hpsa_getdrivver_ioctl(h
, argp
);
6552 case CCISS_PASSTHRU
:
6553 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6555 rc
= hpsa_passthru_ioctl(h
, argp
);
6556 atomic_inc(&h
->passthru_cmds_avail
);
6558 case CCISS_BIG_PASSTHRU
:
6559 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6561 rc
= hpsa_big_passthru_ioctl(h
, argp
);
6562 atomic_inc(&h
->passthru_cmds_avail
);
6569 static void hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
6572 struct CommandList
*c
;
6576 /* fill_cmd can't fail here, no data buffer to map */
6577 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
6578 RAID_CTLR_LUNID
, TYPE_MSG
);
6579 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
6581 enqueue_cmd_and_start_io(h
, c
);
6582 /* Don't wait for completion, the reset won't complete. Don't free
6583 * the command either. This is the last command we will send before
6584 * re-initializing everything, so it doesn't matter and won't leak.
6589 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
6590 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
6593 int pci_dir
= XFER_NONE
;
6594 u64 tag
; /* for commands to be aborted */
6596 c
->cmd_type
= CMD_IOCTL_PEND
;
6597 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6598 c
->Header
.ReplyQueue
= 0;
6599 if (buff
!= NULL
&& size
> 0) {
6600 c
->Header
.SGList
= 1;
6601 c
->Header
.SGTotal
= cpu_to_le16(1);
6603 c
->Header
.SGList
= 0;
6604 c
->Header
.SGTotal
= cpu_to_le16(0);
6606 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
6608 if (cmd_type
== TYPE_CMD
) {
6611 /* are we trying to read a vital product page */
6612 if (page_code
& VPD_PAGE
) {
6613 c
->Request
.CDB
[1] = 0x01;
6614 c
->Request
.CDB
[2] = (page_code
& 0xff);
6616 c
->Request
.CDBLen
= 6;
6617 c
->Request
.type_attr_dir
=
6618 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6619 c
->Request
.Timeout
= 0;
6620 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
6621 c
->Request
.CDB
[4] = size
& 0xFF;
6623 case HPSA_REPORT_LOG
:
6624 case HPSA_REPORT_PHYS
:
6625 /* Talking to controller so It's a physical command
6626 mode = 00 target = 0. Nothing to write.
6628 c
->Request
.CDBLen
= 12;
6629 c
->Request
.type_attr_dir
=
6630 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6631 c
->Request
.Timeout
= 0;
6632 c
->Request
.CDB
[0] = cmd
;
6633 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6634 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6635 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6636 c
->Request
.CDB
[9] = size
& 0xFF;
6638 case BMIC_SENSE_DIAG_OPTIONS
:
6639 c
->Request
.CDBLen
= 16;
6640 c
->Request
.type_attr_dir
=
6641 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6642 c
->Request
.Timeout
= 0;
6643 /* Spec says this should be BMIC_WRITE */
6644 c
->Request
.CDB
[0] = BMIC_READ
;
6645 c
->Request
.CDB
[6] = BMIC_SENSE_DIAG_OPTIONS
;
6647 case BMIC_SET_DIAG_OPTIONS
:
6648 c
->Request
.CDBLen
= 16;
6649 c
->Request
.type_attr_dir
=
6650 TYPE_ATTR_DIR(cmd_type
,
6651 ATTR_SIMPLE
, XFER_WRITE
);
6652 c
->Request
.Timeout
= 0;
6653 c
->Request
.CDB
[0] = BMIC_WRITE
;
6654 c
->Request
.CDB
[6] = BMIC_SET_DIAG_OPTIONS
;
6656 case HPSA_CACHE_FLUSH
:
6657 c
->Request
.CDBLen
= 12;
6658 c
->Request
.type_attr_dir
=
6659 TYPE_ATTR_DIR(cmd_type
,
6660 ATTR_SIMPLE
, XFER_WRITE
);
6661 c
->Request
.Timeout
= 0;
6662 c
->Request
.CDB
[0] = BMIC_WRITE
;
6663 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
6664 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
6665 c
->Request
.CDB
[8] = size
& 0xFF;
6667 case TEST_UNIT_READY
:
6668 c
->Request
.CDBLen
= 6;
6669 c
->Request
.type_attr_dir
=
6670 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6671 c
->Request
.Timeout
= 0;
6673 case HPSA_GET_RAID_MAP
:
6674 c
->Request
.CDBLen
= 12;
6675 c
->Request
.type_attr_dir
=
6676 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6677 c
->Request
.Timeout
= 0;
6678 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
6679 c
->Request
.CDB
[1] = cmd
;
6680 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6681 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6682 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6683 c
->Request
.CDB
[9] = size
& 0xFF;
6685 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
6686 c
->Request
.CDBLen
= 10;
6687 c
->Request
.type_attr_dir
=
6688 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6689 c
->Request
.Timeout
= 0;
6690 c
->Request
.CDB
[0] = BMIC_READ
;
6691 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
6692 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6693 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6695 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
6696 c
->Request
.CDBLen
= 10;
6697 c
->Request
.type_attr_dir
=
6698 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6699 c
->Request
.Timeout
= 0;
6700 c
->Request
.CDB
[0] = BMIC_READ
;
6701 c
->Request
.CDB
[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE
;
6702 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6703 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6705 case BMIC_SENSE_SUBSYSTEM_INFORMATION
:
6706 c
->Request
.CDBLen
= 10;
6707 c
->Request
.type_attr_dir
=
6708 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6709 c
->Request
.Timeout
= 0;
6710 c
->Request
.CDB
[0] = BMIC_READ
;
6711 c
->Request
.CDB
[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION
;
6712 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6713 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6715 case BMIC_SENSE_STORAGE_BOX_PARAMS
:
6716 c
->Request
.CDBLen
= 10;
6717 c
->Request
.type_attr_dir
=
6718 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6719 c
->Request
.Timeout
= 0;
6720 c
->Request
.CDB
[0] = BMIC_READ
;
6721 c
->Request
.CDB
[6] = BMIC_SENSE_STORAGE_BOX_PARAMS
;
6722 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6723 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6725 case BMIC_IDENTIFY_CONTROLLER
:
6726 c
->Request
.CDBLen
= 10;
6727 c
->Request
.type_attr_dir
=
6728 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6729 c
->Request
.Timeout
= 0;
6730 c
->Request
.CDB
[0] = BMIC_READ
;
6731 c
->Request
.CDB
[1] = 0;
6732 c
->Request
.CDB
[2] = 0;
6733 c
->Request
.CDB
[3] = 0;
6734 c
->Request
.CDB
[4] = 0;
6735 c
->Request
.CDB
[5] = 0;
6736 c
->Request
.CDB
[6] = BMIC_IDENTIFY_CONTROLLER
;
6737 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6738 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6739 c
->Request
.CDB
[9] = 0;
6742 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
6746 } else if (cmd_type
== TYPE_MSG
) {
6749 case HPSA_PHYS_TARGET_RESET
:
6750 c
->Request
.CDBLen
= 16;
6751 c
->Request
.type_attr_dir
=
6752 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6753 c
->Request
.Timeout
= 0; /* Don't time out */
6754 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6755 c
->Request
.CDB
[0] = HPSA_RESET
;
6756 c
->Request
.CDB
[1] = HPSA_TARGET_RESET_TYPE
;
6757 /* Physical target reset needs no control bytes 4-7*/
6758 c
->Request
.CDB
[4] = 0x00;
6759 c
->Request
.CDB
[5] = 0x00;
6760 c
->Request
.CDB
[6] = 0x00;
6761 c
->Request
.CDB
[7] = 0x00;
6763 case HPSA_DEVICE_RESET_MSG
:
6764 c
->Request
.CDBLen
= 16;
6765 c
->Request
.type_attr_dir
=
6766 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6767 c
->Request
.Timeout
= 0; /* Don't time out */
6768 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6769 c
->Request
.CDB
[0] = cmd
;
6770 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
6771 /* If bytes 4-7 are zero, it means reset the */
6773 c
->Request
.CDB
[4] = 0x00;
6774 c
->Request
.CDB
[5] = 0x00;
6775 c
->Request
.CDB
[6] = 0x00;
6776 c
->Request
.CDB
[7] = 0x00;
6778 case HPSA_ABORT_MSG
:
6779 memcpy(&tag
, buff
, sizeof(tag
));
6780 dev_dbg(&h
->pdev
->dev
,
6781 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6782 tag
, c
->Header
.tag
);
6783 c
->Request
.CDBLen
= 16;
6784 c
->Request
.type_attr_dir
=
6785 TYPE_ATTR_DIR(cmd_type
,
6786 ATTR_SIMPLE
, XFER_WRITE
);
6787 c
->Request
.Timeout
= 0; /* Don't time out */
6788 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
6789 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
6790 c
->Request
.CDB
[2] = 0x00; /* reserved */
6791 c
->Request
.CDB
[3] = 0x00; /* reserved */
6792 /* Tag to abort goes in CDB[4]-CDB[11] */
6793 memcpy(&c
->Request
.CDB
[4], &tag
, sizeof(tag
));
6794 c
->Request
.CDB
[12] = 0x00; /* reserved */
6795 c
->Request
.CDB
[13] = 0x00; /* reserved */
6796 c
->Request
.CDB
[14] = 0x00; /* reserved */
6797 c
->Request
.CDB
[15] = 0x00; /* reserved */
6800 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
6805 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
6809 switch (GET_DIR(c
->Request
.type_attr_dir
)) {
6811 pci_dir
= PCI_DMA_FROMDEVICE
;
6814 pci_dir
= PCI_DMA_TODEVICE
;
6817 pci_dir
= PCI_DMA_NONE
;
6820 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
6822 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
6828 * Map (physical) PCI mem into (virtual) kernel space
6830 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
6832 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
6833 ulong page_offs
= ((ulong
) base
) - page_base
;
6834 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
6837 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
6840 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
6842 return h
->access
.command_completed(h
, q
);
6845 static inline bool interrupt_pending(struct ctlr_info
*h
)
6847 return h
->access
.intr_pending(h
);
6850 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
6852 return (h
->access
.intr_pending(h
) == 0) ||
6853 (h
->interrupts_enabled
== 0);
6856 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
6859 if (unlikely(tag_index
>= h
->nr_cmds
)) {
6860 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
6866 static inline void finish_cmd(struct CommandList
*c
)
6868 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
6869 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
6870 || c
->cmd_type
== CMD_IOACCEL2
))
6871 complete_scsi_command(c
);
6872 else if (c
->cmd_type
== CMD_IOCTL_PEND
|| c
->cmd_type
== IOACCEL2_TMF
)
6873 complete(c
->waiting
);
6876 /* process completion of an indexed ("direct lookup") command */
6877 static inline void process_indexed_cmd(struct ctlr_info
*h
,
6881 struct CommandList
*c
;
6883 tag_index
= raw_tag
>> DIRECT_LOOKUP_SHIFT
;
6884 if (!bad_tag(h
, tag_index
, raw_tag
)) {
6885 c
= h
->cmd_pool
+ tag_index
;
6890 /* Some controllers, like p400, will give us one interrupt
6891 * after a soft reset, even if we turned interrupts off.
6892 * Only need to check for this in the hpsa_xxx_discard_completions
6895 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
6897 if (likely(!reset_devices
))
6900 if (likely(h
->interrupts_enabled
))
6903 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
6904 "(known firmware bug.) Ignoring.\n");
6910 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6911 * Relies on (h-q[x] == x) being true for x such that
6912 * 0 <= x < MAX_REPLY_QUEUES.
6914 static struct ctlr_info
*queue_to_hba(u8
*queue
)
6916 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
6919 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
6921 struct ctlr_info
*h
= queue_to_hba(queue
);
6922 u8 q
= *(u8
*) queue
;
6925 if (ignore_bogus_interrupt(h
))
6928 if (interrupt_not_for_us(h
))
6930 h
->last_intr_timestamp
= get_jiffies_64();
6931 while (interrupt_pending(h
)) {
6932 raw_tag
= get_next_completion(h
, q
);
6933 while (raw_tag
!= FIFO_EMPTY
)
6934 raw_tag
= next_command(h
, q
);
6939 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
6941 struct ctlr_info
*h
= queue_to_hba(queue
);
6943 u8 q
= *(u8
*) queue
;
6945 if (ignore_bogus_interrupt(h
))
6948 h
->last_intr_timestamp
= get_jiffies_64();
6949 raw_tag
= get_next_completion(h
, q
);
6950 while (raw_tag
!= FIFO_EMPTY
)
6951 raw_tag
= next_command(h
, q
);
6955 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
6957 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
6959 u8 q
= *(u8
*) queue
;
6961 if (interrupt_not_for_us(h
))
6963 h
->last_intr_timestamp
= get_jiffies_64();
6964 while (interrupt_pending(h
)) {
6965 raw_tag
= get_next_completion(h
, q
);
6966 while (raw_tag
!= FIFO_EMPTY
) {
6967 process_indexed_cmd(h
, raw_tag
);
6968 raw_tag
= next_command(h
, q
);
6974 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
6976 struct ctlr_info
*h
= queue_to_hba(queue
);
6978 u8 q
= *(u8
*) queue
;
6980 h
->last_intr_timestamp
= get_jiffies_64();
6981 raw_tag
= get_next_completion(h
, q
);
6982 while (raw_tag
!= FIFO_EMPTY
) {
6983 process_indexed_cmd(h
, raw_tag
);
6984 raw_tag
= next_command(h
, q
);
6989 /* Send a message CDB to the firmware. Careful, this only works
6990 * in simple mode, not performant mode due to the tag lookup.
6991 * We only ever use this immediately after a controller reset.
6993 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
6997 struct CommandListHeader CommandHeader
;
6998 struct RequestBlock Request
;
6999 struct ErrDescriptor ErrorDescriptor
;
7001 struct Command
*cmd
;
7002 static const size_t cmd_sz
= sizeof(*cmd
) +
7003 sizeof(cmd
->ErrorDescriptor
);
7007 void __iomem
*vaddr
;
7010 vaddr
= pci_ioremap_bar(pdev
, 0);
7014 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7015 * CCISS commands, so they must be allocated from the lower 4GiB of
7018 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
7024 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
7030 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7031 * although there's no guarantee, we assume that the address is at
7032 * least 4-byte aligned (most likely, it's page-aligned).
7034 paddr32
= cpu_to_le32(paddr64
);
7036 cmd
->CommandHeader
.ReplyQueue
= 0;
7037 cmd
->CommandHeader
.SGList
= 0;
7038 cmd
->CommandHeader
.SGTotal
= cpu_to_le16(0);
7039 cmd
->CommandHeader
.tag
= cpu_to_le64(paddr64
);
7040 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
7042 cmd
->Request
.CDBLen
= 16;
7043 cmd
->Request
.type_attr_dir
=
7044 TYPE_ATTR_DIR(TYPE_MSG
, ATTR_HEADOFQUEUE
, XFER_NONE
);
7045 cmd
->Request
.Timeout
= 0; /* Don't time out */
7046 cmd
->Request
.CDB
[0] = opcode
;
7047 cmd
->Request
.CDB
[1] = type
;
7048 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
7049 cmd
->ErrorDescriptor
.Addr
=
7050 cpu_to_le64((le32_to_cpu(paddr32
) + sizeof(*cmd
)));
7051 cmd
->ErrorDescriptor
.Len
= cpu_to_le32(sizeof(struct ErrorInfo
));
7053 writel(le32_to_cpu(paddr32
), vaddr
+ SA5_REQUEST_PORT_OFFSET
);
7055 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
7056 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
7057 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr64
)
7059 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
7064 /* we leak the DMA buffer here ... no choice since the controller could
7065 * still complete the command.
7067 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
7068 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
7073 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
7075 if (tag
& HPSA_ERROR_BIT
) {
7076 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
7081 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
7086 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7088 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
7089 void __iomem
*vaddr
, u32 use_doorbell
)
7093 /* For everything after the P600, the PCI power state method
7094 * of resetting the controller doesn't work, so we have this
7095 * other way using the doorbell register.
7097 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
7098 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
7100 /* PMC hardware guys tell us we need a 10 second delay after
7101 * doorbell reset and before any attempt to talk to the board
7102 * at all to ensure that this actually works and doesn't fall
7103 * over in some weird corner cases.
7106 } else { /* Try to do it the PCI power state way */
7108 /* Quoting from the Open CISS Specification: "The Power
7109 * Management Control/Status Register (CSR) controls the power
7110 * state of the device. The normal operating state is D0,
7111 * CSR=00h. The software off state is D3, CSR=03h. To reset
7112 * the controller, place the interface device in D3 then to D0,
7113 * this causes a secondary PCI reset which will reset the
7118 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
7120 /* enter the D3hot power management state */
7121 rc
= pci_set_power_state(pdev
, PCI_D3hot
);
7127 /* enter the D0 power management state */
7128 rc
= pci_set_power_state(pdev
, PCI_D0
);
7133 * The P600 requires a small delay when changing states.
7134 * Otherwise we may think the board did not reset and we bail.
7135 * This for kdump only and is particular to the P600.
7142 static void init_driver_version(char *driver_version
, int len
)
7144 memset(driver_version
, 0, len
);
7145 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
7148 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
7150 char *driver_version
;
7151 int i
, size
= sizeof(cfgtable
->driver_version
);
7153 driver_version
= kmalloc(size
, GFP_KERNEL
);
7154 if (!driver_version
)
7157 init_driver_version(driver_version
, size
);
7158 for (i
= 0; i
< size
; i
++)
7159 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
7160 kfree(driver_version
);
7164 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
7165 unsigned char *driver_ver
)
7169 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
7170 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
7173 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
7176 char *driver_ver
, *old_driver_ver
;
7177 int rc
, size
= sizeof(cfgtable
->driver_version
);
7179 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
7180 if (!old_driver_ver
)
7182 driver_ver
= old_driver_ver
+ size
;
7184 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7185 * should have been changed, otherwise we know the reset failed.
7187 init_driver_version(old_driver_ver
, size
);
7188 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
7189 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
7190 kfree(old_driver_ver
);
7193 /* This does a hard reset of the controller using PCI power management
7194 * states or the using the doorbell register.
7196 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
, u32 board_id
)
7200 u64 cfg_base_addr_index
;
7201 void __iomem
*vaddr
;
7202 unsigned long paddr
;
7203 u32 misc_fw_support
;
7205 struct CfgTable __iomem
*cfgtable
;
7207 u16 command_register
;
7209 /* For controllers as old as the P600, this is very nearly
7212 * pci_save_state(pci_dev);
7213 * pci_set_power_state(pci_dev, PCI_D3hot);
7214 * pci_set_power_state(pci_dev, PCI_D0);
7215 * pci_restore_state(pci_dev);
7217 * For controllers newer than the P600, the pci power state
7218 * method of resetting doesn't work so we have another way
7219 * using the doorbell register.
7222 if (!ctlr_is_resettable(board_id
)) {
7223 dev_warn(&pdev
->dev
, "Controller not resettable\n");
7227 /* if controller is soft- but not hard resettable... */
7228 if (!ctlr_is_hard_resettable(board_id
))
7229 return -ENOTSUPP
; /* try soft reset later. */
7231 /* Save the PCI command register */
7232 pci_read_config_word(pdev
, 4, &command_register
);
7233 pci_save_state(pdev
);
7235 /* find the first memory BAR, so we can find the cfg table */
7236 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
7239 vaddr
= remap_pci_mem(paddr
, 0x250);
7243 /* find cfgtable in order to check if reset via doorbell is supported */
7244 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
7245 &cfg_base_addr_index
, &cfg_offset
);
7248 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
7249 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
7254 rc
= write_driver_ver_to_cfgtable(cfgtable
);
7256 goto unmap_cfgtable
;
7258 /* If reset via doorbell register is supported, use that.
7259 * There are two such methods. Favor the newest method.
7261 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
7262 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
7264 use_doorbell
= DOORBELL_CTLR_RESET2
;
7266 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
7268 dev_warn(&pdev
->dev
,
7269 "Soft reset not supported. Firmware update is required.\n");
7270 rc
= -ENOTSUPP
; /* try soft reset */
7271 goto unmap_cfgtable
;
7275 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
7277 goto unmap_cfgtable
;
7279 pci_restore_state(pdev
);
7280 pci_write_config_word(pdev
, 4, command_register
);
7282 /* Some devices (notably the HP Smart Array 5i Controller)
7283 need a little pause here */
7284 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
7286 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
7288 dev_warn(&pdev
->dev
,
7289 "Failed waiting for board to become ready after hard reset\n");
7290 goto unmap_cfgtable
;
7293 rc
= controller_reset_failed(vaddr
);
7295 goto unmap_cfgtable
;
7297 dev_warn(&pdev
->dev
, "Unable to successfully reset "
7298 "controller. Will try soft reset.\n");
7301 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
7313 * We cannot read the structure directly, for portability we must use
7315 * This is for debug only.
7317 static void print_cfg_table(struct device
*dev
, struct CfgTable __iomem
*tb
)
7323 dev_info(dev
, "Controller Configuration information\n");
7324 dev_info(dev
, "------------------------------------\n");
7325 for (i
= 0; i
< 4; i
++)
7326 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
7327 temp_name
[4] = '\0';
7328 dev_info(dev
, " Signature = %s\n", temp_name
);
7329 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
7330 dev_info(dev
, " Transport methods supported = 0x%x\n",
7331 readl(&(tb
->TransportSupport
)));
7332 dev_info(dev
, " Transport methods active = 0x%x\n",
7333 readl(&(tb
->TransportActive
)));
7334 dev_info(dev
, " Requested transport Method = 0x%x\n",
7335 readl(&(tb
->HostWrite
.TransportRequest
)));
7336 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
7337 readl(&(tb
->HostWrite
.CoalIntDelay
)));
7338 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
7339 readl(&(tb
->HostWrite
.CoalIntCount
)));
7340 dev_info(dev
, " Max outstanding commands = %d\n",
7341 readl(&(tb
->CmdsOutMax
)));
7342 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
7343 for (i
= 0; i
< 16; i
++)
7344 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
7345 temp_name
[16] = '\0';
7346 dev_info(dev
, " Server Name = %s\n", temp_name
);
7347 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
7348 readl(&(tb
->HeartBeat
)));
7349 #endif /* HPSA_DEBUG */
7352 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
7354 int i
, offset
, mem_type
, bar_type
;
7356 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
7359 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
7360 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
7361 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
7364 mem_type
= pci_resource_flags(pdev
, i
) &
7365 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
7367 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
7368 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
7369 offset
+= 4; /* 32 bit */
7371 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
7374 default: /* reserved in PCI 2.2 */
7375 dev_warn(&pdev
->dev
,
7376 "base address is invalid\n");
7381 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
7387 static void hpsa_disable_interrupt_mode(struct ctlr_info
*h
)
7389 if (h
->msix_vector
) {
7390 if (h
->pdev
->msix_enabled
)
7391 pci_disable_msix(h
->pdev
);
7393 } else if (h
->msi_vector
) {
7394 if (h
->pdev
->msi_enabled
)
7395 pci_disable_msi(h
->pdev
);
7400 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7401 * controllers that are capable. If not, we use legacy INTx mode.
7403 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
7405 #ifdef CONFIG_PCI_MSI
7407 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
7409 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
7410 hpsa_msix_entries
[i
].vector
= 0;
7411 hpsa_msix_entries
[i
].entry
= i
;
7414 /* Some boards advertise MSI but don't really support it */
7415 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
7416 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
7417 goto default_int_mode
;
7418 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
7419 dev_info(&h
->pdev
->dev
, "MSI-X capable controller\n");
7420 h
->msix_vector
= MAX_REPLY_QUEUES
;
7421 if (h
->msix_vector
> num_online_cpus())
7422 h
->msix_vector
= num_online_cpus();
7423 err
= pci_enable_msix_range(h
->pdev
, hpsa_msix_entries
,
7426 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n", err
);
7428 goto single_msi_mode
;
7429 } else if (err
< h
->msix_vector
) {
7430 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
7431 "available\n", err
);
7433 h
->msix_vector
= err
;
7434 for (i
= 0; i
< h
->msix_vector
; i
++)
7435 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
7439 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
7440 dev_info(&h
->pdev
->dev
, "MSI capable controller\n");
7441 if (!pci_enable_msi(h
->pdev
))
7444 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
7447 #endif /* CONFIG_PCI_MSI */
7448 /* if we get here we're going to use the default interrupt mode */
7449 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
7452 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
7455 u32 subsystem_vendor_id
, subsystem_device_id
;
7457 subsystem_vendor_id
= pdev
->subsystem_vendor
;
7458 subsystem_device_id
= pdev
->subsystem_device
;
7459 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
7460 subsystem_vendor_id
;
7462 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
7463 if (*board_id
== products
[i
].board_id
)
7466 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
7467 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
7469 dev_warn(&pdev
->dev
, "unrecognized board ID: "
7470 "0x%08x, ignoring.\n", *board_id
);
7473 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
7476 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
7477 unsigned long *memory_bar
)
7481 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
7482 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
7483 /* addressing mode bits already removed */
7484 *memory_bar
= pci_resource_start(pdev
, i
);
7485 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
7489 dev_warn(&pdev
->dev
, "no memory BAR found\n");
7493 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7499 iterations
= HPSA_BOARD_READY_ITERATIONS
;
7501 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
7503 for (i
= 0; i
< iterations
; i
++) {
7504 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
7505 if (wait_for_ready
) {
7506 if (scratchpad
== HPSA_FIRMWARE_READY
)
7509 if (scratchpad
!= HPSA_FIRMWARE_READY
)
7512 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
7514 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
7518 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7519 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
7522 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
7523 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
7524 *cfg_base_addr
&= (u32
) 0x0000ffff;
7525 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
7526 if (*cfg_base_addr_index
== -1) {
7527 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
7533 static void hpsa_free_cfgtables(struct ctlr_info
*h
)
7535 if (h
->transtable
) {
7536 iounmap(h
->transtable
);
7537 h
->transtable
= NULL
;
7540 iounmap(h
->cfgtable
);
7545 /* Find and map CISS config table and transfer table
7546 + * several items must be unmapped (freed) later
7548 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
7552 u64 cfg_base_addr_index
;
7556 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7557 &cfg_base_addr_index
, &cfg_offset
);
7560 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7561 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
7563 dev_err(&h
->pdev
->dev
, "Failed mapping cfgtable\n");
7566 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
7569 /* Find performant mode table. */
7570 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
7571 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7572 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
7573 sizeof(*h
->transtable
));
7574 if (!h
->transtable
) {
7575 dev_err(&h
->pdev
->dev
, "Failed mapping transfer table\n");
7576 hpsa_free_cfgtables(h
);
7582 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
7584 #define MIN_MAX_COMMANDS 16
7585 BUILD_BUG_ON(MIN_MAX_COMMANDS
<= HPSA_NRESERVED_CMDS
);
7587 h
->max_commands
= readl(&h
->cfgtable
->MaxPerformantModeCommands
);
7589 /* Limit commands in memory limited kdump scenario. */
7590 if (reset_devices
&& h
->max_commands
> 32)
7591 h
->max_commands
= 32;
7593 if (h
->max_commands
< MIN_MAX_COMMANDS
) {
7594 dev_warn(&h
->pdev
->dev
,
7595 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7598 h
->max_commands
= MIN_MAX_COMMANDS
;
7602 /* If the controller reports that the total max sg entries is greater than 512,
7603 * then we know that chained SG blocks work. (Original smart arrays did not
7604 * support chained SG blocks and would return zero for max sg entries.)
7606 static int hpsa_supports_chained_sg_blocks(struct ctlr_info
*h
)
7608 return h
->maxsgentries
> 512;
7611 /* Interrogate the hardware for some limits:
7612 * max commands, max SG elements without chaining, and with chaining,
7613 * SG chain block size, etc.
7615 static void hpsa_find_board_params(struct ctlr_info
*h
)
7617 hpsa_get_max_perf_mode_cmds(h
);
7618 h
->nr_cmds
= h
->max_commands
;
7619 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
7620 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
7621 if (hpsa_supports_chained_sg_blocks(h
)) {
7622 /* Limit in-command s/g elements to 32 save dma'able memory. */
7623 h
->max_cmd_sg_entries
= 32;
7624 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
;
7625 h
->maxsgentries
--; /* save one for chain pointer */
7628 * Original smart arrays supported at most 31 s/g entries
7629 * embedded inline in the command (trying to use more
7630 * would lock up the controller)
7632 h
->max_cmd_sg_entries
= 31;
7633 h
->maxsgentries
= 31; /* default to traditional values */
7637 /* Find out what task management functions are supported and cache */
7638 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
7639 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
7640 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
7641 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
7642 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
7643 if (!(HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
))
7644 dev_warn(&h
->pdev
->dev
, "HP SSD Smart Path aborts not supported\n");
7647 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
7649 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
7650 dev_err(&h
->pdev
->dev
, "not a valid CISS config table\n");
7656 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
7660 driver_support
= readl(&(h
->cfgtable
->driver_support
));
7661 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7663 driver_support
|= ENABLE_SCSI_PREFETCH
;
7665 driver_support
|= ENABLE_UNIT_ATTN
;
7666 writel(driver_support
, &(h
->cfgtable
->driver_support
));
7669 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7670 * in a prefetch beyond physical memory.
7672 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
7676 if (h
->board_id
!= 0x3225103C)
7678 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
7679 dma_prefetch
|= 0x8000;
7680 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
7683 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
7687 unsigned long flags
;
7688 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7689 for (i
= 0; i
< MAX_CLEAR_EVENT_WAIT
; i
++) {
7690 spin_lock_irqsave(&h
->lock
, flags
);
7691 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7692 spin_unlock_irqrestore(&h
->lock
, flags
);
7693 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
7695 /* delay and try again */
7696 msleep(CLEAR_EVENT_WAIT_INTERVAL
);
7703 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
7707 unsigned long flags
;
7709 /* under certain very rare conditions, this can take awhile.
7710 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7711 * as we enter this code.)
7713 for (i
= 0; i
< MAX_MODE_CHANGE_WAIT
; i
++) {
7714 if (h
->remove_in_progress
)
7716 spin_lock_irqsave(&h
->lock
, flags
);
7717 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7718 spin_unlock_irqrestore(&h
->lock
, flags
);
7719 if (!(doorbell_value
& CFGTBL_ChangeReq
))
7721 /* delay and try again */
7722 msleep(MODE_CHANGE_WAIT_INTERVAL
);
7729 /* return -ENODEV or other reason on error, 0 on success */
7730 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
7734 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
7735 if (!(trans_support
& SIMPLE_MODE
))
7738 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
7740 /* Update the field, and then ring the doorbell */
7741 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
7742 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
7743 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7744 if (hpsa_wait_for_mode_change_ack(h
))
7746 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
7747 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
7749 h
->transMethod
= CFGTBL_Trans_Simple
;
7752 dev_err(&h
->pdev
->dev
, "failed to enter simple mode\n");
7756 /* free items allocated or mapped by hpsa_pci_init */
7757 static void hpsa_free_pci_init(struct ctlr_info
*h
)
7759 hpsa_free_cfgtables(h
); /* pci_init 4 */
7760 iounmap(h
->vaddr
); /* pci_init 3 */
7762 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
7764 * call pci_disable_device before pci_release_regions per
7765 * Documentation/PCI/pci.txt
7767 pci_disable_device(h
->pdev
); /* pci_init 1 */
7768 pci_release_regions(h
->pdev
); /* pci_init 2 */
7771 /* several items must be freed later */
7772 static int hpsa_pci_init(struct ctlr_info
*h
)
7774 int prod_index
, err
;
7776 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
7779 h
->product_name
= products
[prod_index
].product_name
;
7780 h
->access
= *(products
[prod_index
].access
);
7782 h
->needs_abort_tags_swizzled
=
7783 ctlr_needs_abort_tags_swizzled(h
->board_id
);
7785 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
7786 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
7788 err
= pci_enable_device(h
->pdev
);
7790 dev_err(&h
->pdev
->dev
, "failed to enable PCI device\n");
7791 pci_disable_device(h
->pdev
);
7795 err
= pci_request_regions(h
->pdev
, HPSA
);
7797 dev_err(&h
->pdev
->dev
,
7798 "failed to obtain PCI resources\n");
7799 pci_disable_device(h
->pdev
);
7803 pci_set_master(h
->pdev
);
7805 hpsa_interrupt_mode(h
);
7806 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
7808 goto clean2
; /* intmode+region, pci */
7809 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
7811 dev_err(&h
->pdev
->dev
, "failed to remap PCI mem\n");
7813 goto clean2
; /* intmode+region, pci */
7815 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
7817 goto clean3
; /* vaddr, intmode+region, pci */
7818 err
= hpsa_find_cfgtables(h
);
7820 goto clean3
; /* vaddr, intmode+region, pci */
7821 hpsa_find_board_params(h
);
7823 if (!hpsa_CISS_signature_present(h
)) {
7825 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7827 hpsa_set_driver_support_bits(h
);
7828 hpsa_p600_dma_prefetch_quirk(h
);
7829 err
= hpsa_enter_simple_mode(h
);
7831 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7834 clean4
: /* cfgtables, vaddr, intmode+region, pci */
7835 hpsa_free_cfgtables(h
);
7836 clean3
: /* vaddr, intmode+region, pci */
7839 clean2
: /* intmode+region, pci */
7840 hpsa_disable_interrupt_mode(h
);
7842 * call pci_disable_device before pci_release_regions per
7843 * Documentation/PCI/pci.txt
7845 pci_disable_device(h
->pdev
);
7846 pci_release_regions(h
->pdev
);
7850 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
7854 #define HBA_INQUIRY_BYTE_COUNT 64
7855 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
7856 if (!h
->hba_inquiry_data
)
7858 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
7859 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
7861 kfree(h
->hba_inquiry_data
);
7862 h
->hba_inquiry_data
= NULL
;
7866 static int hpsa_init_reset_devices(struct pci_dev
*pdev
, u32 board_id
)
7869 void __iomem
*vaddr
;
7874 /* kdump kernel is loading, we don't know in which state is
7875 * the pci interface. The dev->enable_cnt is equal zero
7876 * so we call enable+disable, wait a while and switch it on.
7878 rc
= pci_enable_device(pdev
);
7880 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
7883 pci_disable_device(pdev
);
7884 msleep(260); /* a randomly chosen number */
7885 rc
= pci_enable_device(pdev
);
7887 dev_warn(&pdev
->dev
, "failed to enable device.\n");
7891 pci_set_master(pdev
);
7893 vaddr
= pci_ioremap_bar(pdev
, 0);
7894 if (vaddr
== NULL
) {
7898 writel(SA5_INTR_OFF
, vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
7901 /* Reset the controller with a PCI power-cycle or via doorbell */
7902 rc
= hpsa_kdump_hard_reset_controller(pdev
, board_id
);
7904 /* -ENOTSUPP here means we cannot reset the controller
7905 * but it's already (and still) up and running in
7906 * "performant mode". Or, it might be 640x, which can't reset
7907 * due to concerns about shared bbwc between 6402/6404 pair.
7912 /* Now try to get the controller to respond to a no-op */
7913 dev_info(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
7914 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
7915 if (hpsa_noop(pdev
) == 0)
7918 dev_warn(&pdev
->dev
, "no-op failed%s\n",
7919 (i
< 11 ? "; re-trying" : ""));
7924 pci_disable_device(pdev
);
7928 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
7930 kfree(h
->cmd_pool_bits
);
7931 h
->cmd_pool_bits
= NULL
;
7933 pci_free_consistent(h
->pdev
,
7934 h
->nr_cmds
* sizeof(struct CommandList
),
7936 h
->cmd_pool_dhandle
);
7938 h
->cmd_pool_dhandle
= 0;
7940 if (h
->errinfo_pool
) {
7941 pci_free_consistent(h
->pdev
,
7942 h
->nr_cmds
* sizeof(struct ErrorInfo
),
7944 h
->errinfo_pool_dhandle
);
7945 h
->errinfo_pool
= NULL
;
7946 h
->errinfo_pool_dhandle
= 0;
7950 static int hpsa_alloc_cmd_pool(struct ctlr_info
*h
)
7952 h
->cmd_pool_bits
= kzalloc(
7953 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
7954 sizeof(unsigned long), GFP_KERNEL
);
7955 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
7956 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
7957 &(h
->cmd_pool_dhandle
));
7958 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
7959 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
7960 &(h
->errinfo_pool_dhandle
));
7961 if ((h
->cmd_pool_bits
== NULL
)
7962 || (h
->cmd_pool
== NULL
)
7963 || (h
->errinfo_pool
== NULL
)) {
7964 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
7967 hpsa_preinitialize_commands(h
);
7970 hpsa_free_cmd_pool(h
);
7974 static void hpsa_irq_affinity_hints(struct ctlr_info
*h
)
7978 cpu
= cpumask_first(cpu_online_mask
);
7979 for (i
= 0; i
< h
->msix_vector
; i
++) {
7980 irq_set_affinity_hint(h
->intr
[i
], get_cpu_mask(cpu
));
7981 cpu
= cpumask_next(cpu
, cpu_online_mask
);
7985 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7986 static void hpsa_free_irqs(struct ctlr_info
*h
)
7990 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
7991 /* Single reply queue, only one irq to free */
7993 irq_set_affinity_hint(h
->intr
[i
], NULL
);
7994 free_irq(h
->intr
[i
], &h
->q
[i
]);
7999 for (i
= 0; i
< h
->msix_vector
; i
++) {
8000 irq_set_affinity_hint(h
->intr
[i
], NULL
);
8001 free_irq(h
->intr
[i
], &h
->q
[i
]);
8004 for (; i
< MAX_REPLY_QUEUES
; i
++)
8008 /* returns 0 on success; cleans up and returns -Enn on error */
8009 static int hpsa_request_irqs(struct ctlr_info
*h
,
8010 irqreturn_t (*msixhandler
)(int, void *),
8011 irqreturn_t (*intxhandler
)(int, void *))
8016 * initialize h->q[x] = x so that interrupt handlers know which
8019 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
8022 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
> 0) {
8023 /* If performant mode and MSI-X, use multiple reply queues */
8024 for (i
= 0; i
< h
->msix_vector
; i
++) {
8025 sprintf(h
->intrname
[i
], "%s-msix%d", h
->devname
, i
);
8026 rc
= request_irq(h
->intr
[i
], msixhandler
,
8032 dev_err(&h
->pdev
->dev
,
8033 "failed to get irq %d for %s\n",
8034 h
->intr
[i
], h
->devname
);
8035 for (j
= 0; j
< i
; j
++) {
8036 free_irq(h
->intr
[j
], &h
->q
[j
]);
8039 for (; j
< MAX_REPLY_QUEUES
; j
++)
8044 hpsa_irq_affinity_hints(h
);
8046 /* Use single reply pool */
8047 if (h
->msix_vector
> 0 || h
->msi_vector
) {
8049 sprintf(h
->intrname
[h
->intr_mode
],
8050 "%s-msix", h
->devname
);
8052 sprintf(h
->intrname
[h
->intr_mode
],
8053 "%s-msi", h
->devname
);
8054 rc
= request_irq(h
->intr
[h
->intr_mode
],
8056 h
->intrname
[h
->intr_mode
],
8057 &h
->q
[h
->intr_mode
]);
8059 sprintf(h
->intrname
[h
->intr_mode
],
8060 "%s-intx", h
->devname
);
8061 rc
= request_irq(h
->intr
[h
->intr_mode
],
8062 intxhandler
, IRQF_SHARED
,
8063 h
->intrname
[h
->intr_mode
],
8064 &h
->q
[h
->intr_mode
]);
8066 irq_set_affinity_hint(h
->intr
[h
->intr_mode
], NULL
);
8069 dev_err(&h
->pdev
->dev
, "failed to get irq %d for %s\n",
8070 h
->intr
[h
->intr_mode
], h
->devname
);
8077 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
8080 hpsa_send_host_reset(h
, RAID_CTLR_LUNID
, HPSA_RESET_TYPE_CONTROLLER
);
8082 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
8083 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
);
8085 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
8089 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
8090 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
8092 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
8093 "after soft reset.\n");
8100 static void hpsa_free_reply_queues(struct ctlr_info
*h
)
8104 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8105 if (!h
->reply_queue
[i
].head
)
8107 pci_free_consistent(h
->pdev
,
8108 h
->reply_queue_size
,
8109 h
->reply_queue
[i
].head
,
8110 h
->reply_queue
[i
].busaddr
);
8111 h
->reply_queue
[i
].head
= NULL
;
8112 h
->reply_queue
[i
].busaddr
= 0;
8114 h
->reply_queue_size
= 0;
8117 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
8119 hpsa_free_performant_mode(h
); /* init_one 7 */
8120 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
8121 hpsa_free_cmd_pool(h
); /* init_one 5 */
8122 hpsa_free_irqs(h
); /* init_one 4 */
8123 scsi_host_put(h
->scsi_host
); /* init_one 3 */
8124 h
->scsi_host
= NULL
; /* init_one 3 */
8125 hpsa_free_pci_init(h
); /* init_one 2_5 */
8126 free_percpu(h
->lockup_detected
); /* init_one 2 */
8127 h
->lockup_detected
= NULL
; /* init_one 2 */
8128 if (h
->resubmit_wq
) {
8129 destroy_workqueue(h
->resubmit_wq
); /* init_one 1 */
8130 h
->resubmit_wq
= NULL
;
8132 if (h
->rescan_ctlr_wq
) {
8133 destroy_workqueue(h
->rescan_ctlr_wq
);
8134 h
->rescan_ctlr_wq
= NULL
;
8136 kfree(h
); /* init_one 1 */
8139 /* Called when controller lockup detected. */
8140 static void fail_all_outstanding_cmds(struct ctlr_info
*h
)
8143 struct CommandList
*c
;
8146 flush_workqueue(h
->resubmit_wq
); /* ensure all cmds are fully built */
8147 for (i
= 0; i
< h
->nr_cmds
; i
++) {
8148 c
= h
->cmd_pool
+ i
;
8149 refcount
= atomic_inc_return(&c
->refcount
);
8151 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
8153 atomic_dec(&h
->commands_outstanding
);
8158 dev_warn(&h
->pdev
->dev
,
8159 "failed %d commands in fail_all\n", failcount
);
8162 static void set_lockup_detected_for_all_cpus(struct ctlr_info
*h
, u32 value
)
8166 for_each_online_cpu(cpu
) {
8167 u32
*lockup_detected
;
8168 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
8169 *lockup_detected
= value
;
8171 wmb(); /* be sure the per-cpu variables are out to memory */
8174 static void controller_lockup_detected(struct ctlr_info
*h
)
8176 unsigned long flags
;
8177 u32 lockup_detected
;
8179 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8180 spin_lock_irqsave(&h
->lock
, flags
);
8181 lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
8182 if (!lockup_detected
) {
8183 /* no heartbeat, but controller gave us a zero. */
8184 dev_warn(&h
->pdev
->dev
,
8185 "lockup detected after %d but scratchpad register is zero\n",
8186 h
->heartbeat_sample_interval
/ HZ
);
8187 lockup_detected
= 0xffffffff;
8189 set_lockup_detected_for_all_cpus(h
, lockup_detected
);
8190 spin_unlock_irqrestore(&h
->lock
, flags
);
8191 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x after %d\n",
8192 lockup_detected
, h
->heartbeat_sample_interval
/ HZ
);
8193 pci_disable_device(h
->pdev
);
8194 fail_all_outstanding_cmds(h
);
8197 static int detect_controller_lockup(struct ctlr_info
*h
)
8201 unsigned long flags
;
8203 now
= get_jiffies_64();
8204 /* If we've received an interrupt recently, we're ok. */
8205 if (time_after64(h
->last_intr_timestamp
+
8206 (h
->heartbeat_sample_interval
), now
))
8210 * If we've already checked the heartbeat recently, we're ok.
8211 * This could happen if someone sends us a signal. We
8212 * otherwise don't care about signals in this thread.
8214 if (time_after64(h
->last_heartbeat_timestamp
+
8215 (h
->heartbeat_sample_interval
), now
))
8218 /* If heartbeat has not changed since we last looked, we're not ok. */
8219 spin_lock_irqsave(&h
->lock
, flags
);
8220 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
8221 spin_unlock_irqrestore(&h
->lock
, flags
);
8222 if (h
->last_heartbeat
== heartbeat
) {
8223 controller_lockup_detected(h
);
8228 h
->last_heartbeat
= heartbeat
;
8229 h
->last_heartbeat_timestamp
= now
;
8233 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
8238 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8241 /* Ask the controller to clear the events we're handling. */
8242 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
8243 | CFGTBL_Trans_io_accel2
)) &&
8244 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
8245 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
8247 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
8248 event_type
= "state change";
8249 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
8250 event_type
= "configuration change";
8251 /* Stop sending new RAID offload reqs via the IO accelerator */
8252 scsi_block_requests(h
->scsi_host
);
8253 for (i
= 0; i
< h
->ndevices
; i
++)
8254 h
->dev
[i
]->offload_enabled
= 0;
8255 hpsa_drain_accel_commands(h
);
8256 /* Set 'accelerator path config change' bit */
8257 dev_warn(&h
->pdev
->dev
,
8258 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8259 h
->events
, event_type
);
8260 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8261 /* Set the "clear event notify field update" bit 6 */
8262 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8263 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8264 hpsa_wait_for_clear_event_notify_ack(h
);
8265 scsi_unblock_requests(h
->scsi_host
);
8267 /* Acknowledge controller notification events. */
8268 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8269 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8270 hpsa_wait_for_clear_event_notify_ack(h
);
8272 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
8273 hpsa_wait_for_mode_change_ack(h
);
8279 /* Check a register on the controller to see if there are configuration
8280 * changes (added/changed/removed logical drives, etc.) which mean that
8281 * we should rescan the controller for devices.
8282 * Also check flag for driver-initiated rescan.
8284 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
8286 if (h
->drv_req_rescan
) {
8287 h
->drv_req_rescan
= 0;
8291 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8294 h
->events
= readl(&(h
->cfgtable
->event_notify
));
8295 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
8299 * Check if any of the offline devices have become ready
8301 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
8303 unsigned long flags
;
8304 struct offline_device_entry
*d
;
8305 struct list_head
*this, *tmp
;
8307 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8308 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
8309 d
= list_entry(this, struct offline_device_entry
,
8311 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8312 if (!hpsa_volume_offline(h
, d
->scsi3addr
)) {
8313 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8314 list_del(&d
->offline_list
);
8315 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8318 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8320 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8324 static int hpsa_luns_changed(struct ctlr_info
*h
)
8326 int rc
= 1; /* assume there are changes */
8327 struct ReportLUNdata
*logdev
= NULL
;
8329 /* if we can't find out if lun data has changed,
8330 * assume that it has.
8333 if (!h
->lastlogicals
)
8336 logdev
= kzalloc(sizeof(*logdev
), GFP_KERNEL
);
8338 dev_warn(&h
->pdev
->dev
,
8339 "Out of memory, can't track lun changes.\n");
8342 if (hpsa_scsi_do_report_luns(h
, 1, logdev
, sizeof(*logdev
), 0)) {
8343 dev_warn(&h
->pdev
->dev
,
8344 "report luns failed, can't track lun changes.\n");
8347 if (memcmp(logdev
, h
->lastlogicals
, sizeof(*logdev
))) {
8348 dev_info(&h
->pdev
->dev
,
8349 "Lun changes detected.\n");
8350 memcpy(h
->lastlogicals
, logdev
, sizeof(*logdev
));
8353 rc
= 0; /* no changes detected. */
8359 static void hpsa_rescan_ctlr_worker(struct work_struct
*work
)
8361 unsigned long flags
;
8362 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8363 struct ctlr_info
, rescan_ctlr_work
);
8366 if (h
->remove_in_progress
)
8369 if (hpsa_ctlr_needs_rescan(h
) || hpsa_offline_devices_ready(h
)) {
8370 scsi_host_get(h
->scsi_host
);
8371 hpsa_ack_ctlr_events(h
);
8372 hpsa_scan_start(h
->scsi_host
);
8373 scsi_host_put(h
->scsi_host
);
8374 } else if (h
->discovery_polling
) {
8375 hpsa_disable_rld_caching(h
);
8376 if (hpsa_luns_changed(h
)) {
8377 struct Scsi_Host
*sh
= NULL
;
8379 dev_info(&h
->pdev
->dev
,
8380 "driver discovery polling rescan.\n");
8381 sh
= scsi_host_get(h
->scsi_host
);
8383 hpsa_scan_start(sh
);
8388 spin_lock_irqsave(&h
->lock
, flags
);
8389 if (!h
->remove_in_progress
)
8390 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8391 h
->heartbeat_sample_interval
);
8392 spin_unlock_irqrestore(&h
->lock
, flags
);
8395 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
8397 unsigned long flags
;
8398 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8399 struct ctlr_info
, monitor_ctlr_work
);
8401 detect_controller_lockup(h
);
8402 if (lockup_detected(h
))
8405 spin_lock_irqsave(&h
->lock
, flags
);
8406 if (!h
->remove_in_progress
)
8407 schedule_delayed_work(&h
->monitor_ctlr_work
,
8408 h
->heartbeat_sample_interval
);
8409 spin_unlock_irqrestore(&h
->lock
, flags
);
8412 static struct workqueue_struct
*hpsa_create_controller_wq(struct ctlr_info
*h
,
8415 struct workqueue_struct
*wq
= NULL
;
8417 wq
= alloc_ordered_workqueue("%s_%d_hpsa", 0, name
, h
->ctlr
);
8419 dev_err(&h
->pdev
->dev
, "failed to create %s workqueue\n", name
);
8424 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8427 struct ctlr_info
*h
;
8428 int try_soft_reset
= 0;
8429 unsigned long flags
;
8432 if (number_of_controllers
== 0)
8433 printk(KERN_INFO DRIVER_NAME
"\n");
8435 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
8437 dev_warn(&pdev
->dev
, "Board ID not found\n");
8441 rc
= hpsa_init_reset_devices(pdev
, board_id
);
8443 if (rc
!= -ENOTSUPP
)
8445 /* If the reset fails in a particular way (it has no way to do
8446 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8447 * a soft reset once we get the controller configured up to the
8448 * point that it can accept a command.
8454 reinit_after_soft_reset
:
8456 /* Command structures must be aligned on a 32-byte boundary because
8457 * the 5 lower bits of the address are used by the hardware. and by
8458 * the driver. See comments in hpsa.h for more info.
8460 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
8461 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
8463 dev_err(&pdev
->dev
, "Failed to allocate controller head\n");
8469 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
8470 INIT_LIST_HEAD(&h
->offline_device_list
);
8471 spin_lock_init(&h
->lock
);
8472 spin_lock_init(&h
->offline_device_lock
);
8473 spin_lock_init(&h
->scan_lock
);
8474 atomic_set(&h
->passthru_cmds_avail
, HPSA_MAX_CONCURRENT_PASSTHRUS
);
8475 atomic_set(&h
->abort_cmds_available
, HPSA_CMDS_RESERVED_FOR_ABORTS
);
8477 /* Allocate and clear per-cpu variable lockup_detected */
8478 h
->lockup_detected
= alloc_percpu(u32
);
8479 if (!h
->lockup_detected
) {
8480 dev_err(&h
->pdev
->dev
, "Failed to allocate lockup detector\n");
8482 goto clean1
; /* aer/h */
8484 set_lockup_detected_for_all_cpus(h
, 0);
8486 rc
= hpsa_pci_init(h
);
8488 goto clean2
; /* lu, aer/h */
8490 /* relies on h-> settings made by hpsa_pci_init, including
8491 * interrupt_mode h->intr */
8492 rc
= hpsa_scsi_host_alloc(h
);
8494 goto clean2_5
; /* pci, lu, aer/h */
8496 sprintf(h
->devname
, HPSA
"%d", h
->scsi_host
->host_no
);
8497 h
->ctlr
= number_of_controllers
;
8498 number_of_controllers
++;
8500 /* configure PCI DMA stuff */
8501 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
8505 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
8509 dev_err(&pdev
->dev
, "no suitable DMA available\n");
8510 goto clean3
; /* shost, pci, lu, aer/h */
8514 /* make sure the board interrupts are off */
8515 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8517 rc
= hpsa_request_irqs(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
);
8519 goto clean3
; /* shost, pci, lu, aer/h */
8520 rc
= hpsa_alloc_cmd_pool(h
);
8522 goto clean4
; /* irq, shost, pci, lu, aer/h */
8523 rc
= hpsa_alloc_sg_chain_blocks(h
);
8525 goto clean5
; /* cmd, irq, shost, pci, lu, aer/h */
8526 init_waitqueue_head(&h
->scan_wait_queue
);
8527 init_waitqueue_head(&h
->abort_cmd_wait_queue
);
8528 init_waitqueue_head(&h
->event_sync_wait_queue
);
8529 mutex_init(&h
->reset_mutex
);
8530 h
->scan_finished
= 1; /* no scan currently in progress */
8532 pci_set_drvdata(pdev
, h
);
8535 spin_lock_init(&h
->devlock
);
8536 rc
= hpsa_put_ctlr_into_performant_mode(h
);
8538 goto clean6
; /* sg, cmd, irq, shost, pci, lu, aer/h */
8540 /* hook into SCSI subsystem */
8541 rc
= hpsa_scsi_add_host(h
);
8543 goto clean7
; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8545 /* create the resubmit workqueue */
8546 h
->rescan_ctlr_wq
= hpsa_create_controller_wq(h
, "rescan");
8547 if (!h
->rescan_ctlr_wq
) {
8552 h
->resubmit_wq
= hpsa_create_controller_wq(h
, "resubmit");
8553 if (!h
->resubmit_wq
) {
8555 goto clean7
; /* aer/h */
8559 * At this point, the controller is ready to take commands.
8560 * Now, if reset_devices and the hard reset didn't work, try
8561 * the soft reset and see if that works.
8563 if (try_soft_reset
) {
8565 /* This is kind of gross. We may or may not get a completion
8566 * from the soft reset command, and if we do, then the value
8567 * from the fifo may or may not be valid. So, we wait 10 secs
8568 * after the reset throwing away any completions we get during
8569 * that time. Unregister the interrupt handler and register
8570 * fake ones to scoop up any residual completions.
8572 spin_lock_irqsave(&h
->lock
, flags
);
8573 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8574 spin_unlock_irqrestore(&h
->lock
, flags
);
8576 rc
= hpsa_request_irqs(h
, hpsa_msix_discard_completions
,
8577 hpsa_intx_discard_completions
);
8579 dev_warn(&h
->pdev
->dev
,
8580 "Failed to request_irq after soft reset.\n");
8582 * cannot goto clean7 or free_irqs will be called
8583 * again. Instead, do its work
8585 hpsa_free_performant_mode(h
); /* clean7 */
8586 hpsa_free_sg_chain_blocks(h
); /* clean6 */
8587 hpsa_free_cmd_pool(h
); /* clean5 */
8589 * skip hpsa_free_irqs(h) clean4 since that
8590 * was just called before request_irqs failed
8595 rc
= hpsa_kdump_soft_reset(h
);
8597 /* Neither hard nor soft reset worked, we're hosed. */
8600 dev_info(&h
->pdev
->dev
, "Board READY.\n");
8601 dev_info(&h
->pdev
->dev
,
8602 "Waiting for stale completions to drain.\n");
8603 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8605 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8607 rc
= controller_reset_failed(h
->cfgtable
);
8609 dev_info(&h
->pdev
->dev
,
8610 "Soft reset appears to have failed.\n");
8612 /* since the controller's reset, we have to go back and re-init
8613 * everything. Easiest to just forget what we've done and do it
8616 hpsa_undo_allocations_after_kdump_soft_reset(h
);
8619 /* don't goto clean, we already unallocated */
8622 goto reinit_after_soft_reset
;
8625 /* Enable Accelerated IO path at driver layer */
8626 h
->acciopath_status
= 1;
8627 /* Disable discovery polling.*/
8628 h
->discovery_polling
= 0;
8631 /* Turn the interrupts on so we can service requests */
8632 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8634 hpsa_hba_inquiry(h
);
8636 h
->lastlogicals
= kzalloc(sizeof(*(h
->lastlogicals
)), GFP_KERNEL
);
8637 if (!h
->lastlogicals
)
8638 dev_info(&h
->pdev
->dev
,
8639 "Can't track change to report lun data\n");
8641 /* Monitor the controller for firmware lockups */
8642 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
8643 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
8644 schedule_delayed_work(&h
->monitor_ctlr_work
,
8645 h
->heartbeat_sample_interval
);
8646 INIT_DELAYED_WORK(&h
->rescan_ctlr_work
, hpsa_rescan_ctlr_worker
);
8647 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8648 h
->heartbeat_sample_interval
);
8651 clean7
: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8652 hpsa_free_performant_mode(h
);
8653 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8654 clean6
: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8655 hpsa_free_sg_chain_blocks(h
);
8656 clean5
: /* cmd, irq, shost, pci, lu, aer/h */
8657 hpsa_free_cmd_pool(h
);
8658 clean4
: /* irq, shost, pci, lu, aer/h */
8660 clean3
: /* shost, pci, lu, aer/h */
8661 scsi_host_put(h
->scsi_host
);
8662 h
->scsi_host
= NULL
;
8663 clean2_5
: /* pci, lu, aer/h */
8664 hpsa_free_pci_init(h
);
8665 clean2
: /* lu, aer/h */
8666 if (h
->lockup_detected
) {
8667 free_percpu(h
->lockup_detected
);
8668 h
->lockup_detected
= NULL
;
8670 clean1
: /* wq/aer/h */
8671 if (h
->resubmit_wq
) {
8672 destroy_workqueue(h
->resubmit_wq
);
8673 h
->resubmit_wq
= NULL
;
8675 if (h
->rescan_ctlr_wq
) {
8676 destroy_workqueue(h
->rescan_ctlr_wq
);
8677 h
->rescan_ctlr_wq
= NULL
;
8683 static void hpsa_flush_cache(struct ctlr_info
*h
)
8686 struct CommandList
*c
;
8689 if (unlikely(lockup_detected(h
)))
8691 flush_buf
= kzalloc(4, GFP_KERNEL
);
8697 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
8698 RAID_CTLR_LUNID
, TYPE_CMD
)) {
8701 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8702 PCI_DMA_TODEVICE
, NO_TIMEOUT
);
8705 if (c
->err_info
->CommandStatus
!= 0)
8707 dev_warn(&h
->pdev
->dev
,
8708 "error flushing cache on controller\n");
8713 /* Make controller gather fresh report lun data each time we
8714 * send down a report luns request
8716 static void hpsa_disable_rld_caching(struct ctlr_info
*h
)
8719 struct CommandList
*c
;
8722 /* Don't bother trying to set diag options if locked up */
8723 if (unlikely(h
->lockup_detected
))
8726 options
= kzalloc(sizeof(*options
), GFP_KERNEL
);
8728 dev_err(&h
->pdev
->dev
,
8729 "Error: failed to disable rld caching, during alloc.\n");
8735 /* first, get the current diag options settings */
8736 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
8737 RAID_CTLR_LUNID
, TYPE_CMD
))
8740 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8741 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
8742 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8745 /* Now, set the bit for disabling the RLD caching */
8746 *options
|= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
;
8748 if (fill_cmd(c
, BMIC_SET_DIAG_OPTIONS
, h
, options
, 4, 0,
8749 RAID_CTLR_LUNID
, TYPE_CMD
))
8752 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8753 PCI_DMA_TODEVICE
, NO_TIMEOUT
);
8754 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8757 /* Now verify that it got set: */
8758 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
8759 RAID_CTLR_LUNID
, TYPE_CMD
))
8762 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8763 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
8764 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8767 if (*options
& HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
)
8771 dev_err(&h
->pdev
->dev
,
8772 "Error: failed to disable report lun data caching.\n");
8778 static void hpsa_shutdown(struct pci_dev
*pdev
)
8780 struct ctlr_info
*h
;
8782 h
= pci_get_drvdata(pdev
);
8783 /* Turn board interrupts off and send the flush cache command
8784 * sendcmd will turn off interrupt, and send the flush...
8785 * To write all data in the battery backed cache to disks
8787 hpsa_flush_cache(h
);
8788 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8789 hpsa_free_irqs(h
); /* init_one 4 */
8790 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
8793 static void hpsa_free_device_info(struct ctlr_info
*h
)
8797 for (i
= 0; i
< h
->ndevices
; i
++) {
8803 static void hpsa_remove_one(struct pci_dev
*pdev
)
8805 struct ctlr_info
*h
;
8806 unsigned long flags
;
8808 if (pci_get_drvdata(pdev
) == NULL
) {
8809 dev_err(&pdev
->dev
, "unable to remove device\n");
8812 h
= pci_get_drvdata(pdev
);
8814 /* Get rid of any controller monitoring work items */
8815 spin_lock_irqsave(&h
->lock
, flags
);
8816 h
->remove_in_progress
= 1;
8817 spin_unlock_irqrestore(&h
->lock
, flags
);
8818 cancel_delayed_work_sync(&h
->monitor_ctlr_work
);
8819 cancel_delayed_work_sync(&h
->rescan_ctlr_work
);
8820 destroy_workqueue(h
->rescan_ctlr_wq
);
8821 destroy_workqueue(h
->resubmit_wq
);
8824 * Call before disabling interrupts.
8825 * scsi_remove_host can trigger I/O operations especially
8826 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8827 * operations which cannot complete and will hang the system.
8830 scsi_remove_host(h
->scsi_host
); /* init_one 8 */
8831 /* includes hpsa_free_irqs - init_one 4 */
8832 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8833 hpsa_shutdown(pdev
);
8835 hpsa_free_device_info(h
); /* scan */
8837 kfree(h
->hba_inquiry_data
); /* init_one 10 */
8838 h
->hba_inquiry_data
= NULL
; /* init_one 10 */
8839 hpsa_free_ioaccel2_sg_chain_blocks(h
);
8840 hpsa_free_performant_mode(h
); /* init_one 7 */
8841 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
8842 hpsa_free_cmd_pool(h
); /* init_one 5 */
8843 kfree(h
->lastlogicals
);
8845 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8847 scsi_host_put(h
->scsi_host
); /* init_one 3 */
8848 h
->scsi_host
= NULL
; /* init_one 3 */
8850 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8851 hpsa_free_pci_init(h
); /* init_one 2.5 */
8853 free_percpu(h
->lockup_detected
); /* init_one 2 */
8854 h
->lockup_detected
= NULL
; /* init_one 2 */
8855 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8857 hpsa_delete_sas_host(h
);
8859 kfree(h
); /* init_one 1 */
8862 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
8863 __attribute__((unused
)) pm_message_t state
)
8868 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
8873 static struct pci_driver hpsa_pci_driver
= {
8875 .probe
= hpsa_init_one
,
8876 .remove
= hpsa_remove_one
,
8877 .id_table
= hpsa_pci_device_id
, /* id_table */
8878 .shutdown
= hpsa_shutdown
,
8879 .suspend
= hpsa_suspend
,
8880 .resume
= hpsa_resume
,
8883 /* Fill in bucket_map[], given nsgs (the max number of
8884 * scatter gather elements supported) and bucket[],
8885 * which is an array of 8 integers. The bucket[] array
8886 * contains 8 different DMA transfer sizes (in 16
8887 * byte increments) which the controller uses to fetch
8888 * commands. This function fills in bucket_map[], which
8889 * maps a given number of scatter gather elements to one of
8890 * the 8 DMA transfer sizes. The point of it is to allow the
8891 * controller to only do as much DMA as needed to fetch the
8892 * command, with the DMA transfer size encoded in the lower
8893 * bits of the command address.
8895 static void calc_bucket_map(int bucket
[], int num_buckets
,
8896 int nsgs
, int min_blocks
, u32
*bucket_map
)
8900 /* Note, bucket_map must have nsgs+1 entries. */
8901 for (i
= 0; i
<= nsgs
; i
++) {
8902 /* Compute size of a command with i SG entries */
8903 size
= i
+ min_blocks
;
8904 b
= num_buckets
; /* Assume the biggest bucket */
8905 /* Find the bucket that is just big enough */
8906 for (j
= 0; j
< num_buckets
; j
++) {
8907 if (bucket
[j
] >= size
) {
8912 /* for a command with i SG entries, use bucket b. */
8918 * return -ENODEV on err, 0 on success (or no action)
8919 * allocates numerous items that must be freed later
8921 static int hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
8924 unsigned long register_value
;
8925 unsigned long transMethod
= CFGTBL_Trans_Performant
|
8926 (trans_support
& CFGTBL_Trans_use_short_tags
) |
8927 CFGTBL_Trans_enable_directed_msix
|
8928 (trans_support
& (CFGTBL_Trans_io_accel1
|
8929 CFGTBL_Trans_io_accel2
));
8930 struct access_method access
= SA5_performant_access
;
8932 /* This is a bit complicated. There are 8 registers on
8933 * the controller which we write to to tell it 8 different
8934 * sizes of commands which there may be. It's a way of
8935 * reducing the DMA done to fetch each command. Encoded into
8936 * each command's tag are 3 bits which communicate to the controller
8937 * which of the eight sizes that command fits within. The size of
8938 * each command depends on how many scatter gather entries there are.
8939 * Each SG entry requires 16 bytes. The eight registers are programmed
8940 * with the number of 16-byte blocks a command of that size requires.
8941 * The smallest command possible requires 5 such 16 byte blocks.
8942 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8943 * blocks. Note, this only extends to the SG entries contained
8944 * within the command block, and does not extend to chained blocks
8945 * of SG elements. bft[] contains the eight values we write to
8946 * the registers. They are not evenly distributed, but have more
8947 * sizes for small commands, and fewer sizes for larger commands.
8949 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
8950 #define MIN_IOACCEL2_BFT_ENTRY 5
8951 #define HPSA_IOACCEL2_HEADER_SZ 4
8952 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
8953 13, 14, 15, 16, 17, 18, 19,
8954 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
8955 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
8956 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
8957 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
8958 16 * MIN_IOACCEL2_BFT_ENTRY
);
8959 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
8960 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
8961 /* 5 = 1 s/g entry or 4k
8962 * 6 = 2 s/g entry or 8k
8963 * 8 = 4 s/g entry or 16k
8964 * 10 = 6 s/g entry or 24k
8967 /* If the controller supports either ioaccel method then
8968 * we can also use the RAID stack submit path that does not
8969 * perform the superfluous readl() after each command submission.
8971 if (trans_support
& (CFGTBL_Trans_io_accel1
| CFGTBL_Trans_io_accel2
))
8972 access
= SA5_performant_access_no_read
;
8974 /* Controller spec: zero out this buffer. */
8975 for (i
= 0; i
< h
->nreply_queues
; i
++)
8976 memset(h
->reply_queue
[i
].head
, 0, h
->reply_queue_size
);
8978 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
8979 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
8980 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
8981 for (i
= 0; i
< 8; i
++)
8982 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
8984 /* size of controller ring buffer */
8985 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
8986 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
8987 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
8988 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
8990 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8991 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
8992 writel(h
->reply_queue
[i
].busaddr
,
8993 &h
->transtable
->RepQAddr
[i
].lower
);
8996 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
8997 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
8999 * enable outbound interrupt coalescing in accelerator mode;
9001 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9002 access
= SA5_ioaccel_mode1_access
;
9003 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
9004 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
9006 if (trans_support
& CFGTBL_Trans_io_accel2
) {
9007 access
= SA5_ioaccel_mode2_access
;
9008 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
9009 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
9012 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9013 if (hpsa_wait_for_mode_change_ack(h
)) {
9014 dev_err(&h
->pdev
->dev
,
9015 "performant mode problem - doorbell timeout\n");
9018 register_value
= readl(&(h
->cfgtable
->TransportActive
));
9019 if (!(register_value
& CFGTBL_Trans_Performant
)) {
9020 dev_err(&h
->pdev
->dev
,
9021 "performant mode problem - transport not active\n");
9024 /* Change the access methods to the performant access methods */
9026 h
->transMethod
= transMethod
;
9028 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
9029 (trans_support
& CFGTBL_Trans_io_accel2
)))
9032 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9033 /* Set up I/O accelerator mode */
9034 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9035 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
9036 h
->reply_queue
[i
].current_entry
=
9037 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
9039 bft
[7] = h
->ioaccel_maxsg
+ 8;
9040 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
9041 h
->ioaccel1_blockFetchTable
);
9043 /* initialize all reply queue entries to unused */
9044 for (i
= 0; i
< h
->nreply_queues
; i
++)
9045 memset(h
->reply_queue
[i
].head
,
9046 (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
9047 h
->reply_queue_size
);
9049 /* set all the constant fields in the accelerator command
9050 * frames once at init time to save CPU cycles later.
9052 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9053 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
9055 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
9056 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
9057 (i
* sizeof(struct ErrorInfo
)));
9058 cp
->err_info_len
= sizeof(struct ErrorInfo
);
9059 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
9060 cp
->host_context_flags
=
9061 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT
);
9062 cp
->timeout_sec
= 0;
9065 cpu_to_le64((i
<< DIRECT_LOOKUP_SHIFT
));
9067 cpu_to_le64(h
->ioaccel_cmd_pool_dhandle
+
9068 (i
* sizeof(struct io_accel1_cmd
)));
9070 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9071 u64 cfg_offset
, cfg_base_addr_index
;
9072 u32 bft2_offset
, cfg_base_addr
;
9075 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
9076 &cfg_base_addr_index
, &cfg_offset
);
9077 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
9078 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
9079 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
9080 4, h
->ioaccel2_blockFetchTable
);
9081 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
9082 BUILD_BUG_ON(offsetof(struct CfgTable
,
9083 io_accel_request_size_offset
) != 0xb8);
9084 h
->ioaccel2_bft2_regs
=
9085 remap_pci_mem(pci_resource_start(h
->pdev
,
9086 cfg_base_addr_index
) +
9087 cfg_offset
+ bft2_offset
,
9089 sizeof(*h
->ioaccel2_bft2_regs
));
9090 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
9091 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
9093 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9094 if (hpsa_wait_for_mode_change_ack(h
)) {
9095 dev_err(&h
->pdev
->dev
,
9096 "performant mode problem - enabling ioaccel mode\n");
9102 /* Free ioaccel1 mode command blocks and block fetch table */
9103 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9105 if (h
->ioaccel_cmd_pool
) {
9106 pci_free_consistent(h
->pdev
,
9107 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9108 h
->ioaccel_cmd_pool
,
9109 h
->ioaccel_cmd_pool_dhandle
);
9110 h
->ioaccel_cmd_pool
= NULL
;
9111 h
->ioaccel_cmd_pool_dhandle
= 0;
9113 kfree(h
->ioaccel1_blockFetchTable
);
9114 h
->ioaccel1_blockFetchTable
= NULL
;
9117 /* Allocate ioaccel1 mode command blocks and block fetch table */
9118 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9121 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9122 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
9123 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
9125 /* Command structures must be aligned on a 128-byte boundary
9126 * because the 7 lower bits of the address are used by the
9129 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
9130 IOACCEL1_COMMANDLIST_ALIGNMENT
);
9131 h
->ioaccel_cmd_pool
=
9132 pci_alloc_consistent(h
->pdev
,
9133 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9134 &(h
->ioaccel_cmd_pool_dhandle
));
9136 h
->ioaccel1_blockFetchTable
=
9137 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9138 sizeof(u32
)), GFP_KERNEL
);
9140 if ((h
->ioaccel_cmd_pool
== NULL
) ||
9141 (h
->ioaccel1_blockFetchTable
== NULL
))
9144 memset(h
->ioaccel_cmd_pool
, 0,
9145 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
9149 hpsa_free_ioaccel1_cmd_and_bft(h
);
9153 /* Free ioaccel2 mode command blocks and block fetch table */
9154 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9156 hpsa_free_ioaccel2_sg_chain_blocks(h
);
9158 if (h
->ioaccel2_cmd_pool
) {
9159 pci_free_consistent(h
->pdev
,
9160 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9161 h
->ioaccel2_cmd_pool
,
9162 h
->ioaccel2_cmd_pool_dhandle
);
9163 h
->ioaccel2_cmd_pool
= NULL
;
9164 h
->ioaccel2_cmd_pool_dhandle
= 0;
9166 kfree(h
->ioaccel2_blockFetchTable
);
9167 h
->ioaccel2_blockFetchTable
= NULL
;
9170 /* Allocate ioaccel2 mode command blocks and block fetch table */
9171 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9175 /* Allocate ioaccel2 mode command blocks and block fetch table */
9178 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9179 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
9180 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
9182 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
9183 IOACCEL2_COMMANDLIST_ALIGNMENT
);
9184 h
->ioaccel2_cmd_pool
=
9185 pci_alloc_consistent(h
->pdev
,
9186 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9187 &(h
->ioaccel2_cmd_pool_dhandle
));
9189 h
->ioaccel2_blockFetchTable
=
9190 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9191 sizeof(u32
)), GFP_KERNEL
);
9193 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
9194 (h
->ioaccel2_blockFetchTable
== NULL
)) {
9199 rc
= hpsa_allocate_ioaccel2_sg_chain_blocks(h
);
9203 memset(h
->ioaccel2_cmd_pool
, 0,
9204 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
9208 hpsa_free_ioaccel2_cmd_and_bft(h
);
9212 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9213 static void hpsa_free_performant_mode(struct ctlr_info
*h
)
9215 kfree(h
->blockFetchTable
);
9216 h
->blockFetchTable
= NULL
;
9217 hpsa_free_reply_queues(h
);
9218 hpsa_free_ioaccel1_cmd_and_bft(h
);
9219 hpsa_free_ioaccel2_cmd_and_bft(h
);
9222 /* return -ENODEV on error, 0 on success (or no action)
9223 * allocates numerous items that must be freed later
9225 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
9228 unsigned long transMethod
= CFGTBL_Trans_Performant
|
9229 CFGTBL_Trans_use_short_tags
;
9232 if (hpsa_simple_mode
)
9235 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
9236 if (!(trans_support
& PERFORMANT_MODE
))
9239 /* Check for I/O accelerator mode support */
9240 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9241 transMethod
|= CFGTBL_Trans_io_accel1
|
9242 CFGTBL_Trans_enable_directed_msix
;
9243 rc
= hpsa_alloc_ioaccel1_cmd_and_bft(h
);
9246 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9247 transMethod
|= CFGTBL_Trans_io_accel2
|
9248 CFGTBL_Trans_enable_directed_msix
;
9249 rc
= hpsa_alloc_ioaccel2_cmd_and_bft(h
);
9254 h
->nreply_queues
= h
->msix_vector
> 0 ? h
->msix_vector
: 1;
9255 hpsa_get_max_perf_mode_cmds(h
);
9256 /* Performant mode ring buffer and supporting data structures */
9257 h
->reply_queue_size
= h
->max_commands
* sizeof(u64
);
9259 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9260 h
->reply_queue
[i
].head
= pci_alloc_consistent(h
->pdev
,
9261 h
->reply_queue_size
,
9262 &(h
->reply_queue
[i
].busaddr
));
9263 if (!h
->reply_queue
[i
].head
) {
9265 goto clean1
; /* rq, ioaccel */
9267 h
->reply_queue
[i
].size
= h
->max_commands
;
9268 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
9269 h
->reply_queue
[i
].current_entry
= 0;
9272 /* Need a block fetch table for performant mode */
9273 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
9274 sizeof(u32
)), GFP_KERNEL
);
9275 if (!h
->blockFetchTable
) {
9277 goto clean1
; /* rq, ioaccel */
9280 rc
= hpsa_enter_performant_mode(h
, trans_support
);
9282 goto clean2
; /* bft, rq, ioaccel */
9285 clean2
: /* bft, rq, ioaccel */
9286 kfree(h
->blockFetchTable
);
9287 h
->blockFetchTable
= NULL
;
9288 clean1
: /* rq, ioaccel */
9289 hpsa_free_reply_queues(h
);
9290 hpsa_free_ioaccel1_cmd_and_bft(h
);
9291 hpsa_free_ioaccel2_cmd_and_bft(h
);
9295 static int is_accelerated_cmd(struct CommandList
*c
)
9297 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
9300 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
9302 struct CommandList
*c
= NULL
;
9303 int i
, accel_cmds_out
;
9306 do { /* wait for all outstanding ioaccel commands to drain out */
9308 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9309 c
= h
->cmd_pool
+ i
;
9310 refcount
= atomic_inc_return(&c
->refcount
);
9311 if (refcount
> 1) /* Command is allocated */
9312 accel_cmds_out
+= is_accelerated_cmd(c
);
9315 if (accel_cmds_out
<= 0)
9321 static struct hpsa_sas_phy
*hpsa_alloc_sas_phy(
9322 struct hpsa_sas_port
*hpsa_sas_port
)
9324 struct hpsa_sas_phy
*hpsa_sas_phy
;
9325 struct sas_phy
*phy
;
9327 hpsa_sas_phy
= kzalloc(sizeof(*hpsa_sas_phy
), GFP_KERNEL
);
9331 phy
= sas_phy_alloc(hpsa_sas_port
->parent_node
->parent_dev
,
9332 hpsa_sas_port
->next_phy_index
);
9334 kfree(hpsa_sas_phy
);
9338 hpsa_sas_port
->next_phy_index
++;
9339 hpsa_sas_phy
->phy
= phy
;
9340 hpsa_sas_phy
->parent_port
= hpsa_sas_port
;
9342 return hpsa_sas_phy
;
9345 static void hpsa_free_sas_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9347 struct sas_phy
*phy
= hpsa_sas_phy
->phy
;
9349 sas_port_delete_phy(hpsa_sas_phy
->parent_port
->port
, phy
);
9351 if (hpsa_sas_phy
->added_to_port
)
9352 list_del(&hpsa_sas_phy
->phy_list_entry
);
9353 kfree(hpsa_sas_phy
);
9356 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9359 struct hpsa_sas_port
*hpsa_sas_port
;
9360 struct sas_phy
*phy
;
9361 struct sas_identify
*identify
;
9363 hpsa_sas_port
= hpsa_sas_phy
->parent_port
;
9364 phy
= hpsa_sas_phy
->phy
;
9366 identify
= &phy
->identify
;
9367 memset(identify
, 0, sizeof(*identify
));
9368 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9369 identify
->device_type
= SAS_END_DEVICE
;
9370 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9371 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9372 phy
->minimum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9373 phy
->maximum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9374 phy
->minimum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9375 phy
->maximum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9376 phy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9378 rc
= sas_phy_add(hpsa_sas_phy
->phy
);
9382 sas_port_add_phy(hpsa_sas_port
->port
, hpsa_sas_phy
->phy
);
9383 list_add_tail(&hpsa_sas_phy
->phy_list_entry
,
9384 &hpsa_sas_port
->phy_list_head
);
9385 hpsa_sas_phy
->added_to_port
= true;
9391 hpsa_sas_port_add_rphy(struct hpsa_sas_port
*hpsa_sas_port
,
9392 struct sas_rphy
*rphy
)
9394 struct sas_identify
*identify
;
9396 identify
= &rphy
->identify
;
9397 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9398 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9399 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9401 return sas_rphy_add(rphy
);
9404 static struct hpsa_sas_port
9405 *hpsa_alloc_sas_port(struct hpsa_sas_node
*hpsa_sas_node
,
9409 struct hpsa_sas_port
*hpsa_sas_port
;
9410 struct sas_port
*port
;
9412 hpsa_sas_port
= kzalloc(sizeof(*hpsa_sas_port
), GFP_KERNEL
);
9416 INIT_LIST_HEAD(&hpsa_sas_port
->phy_list_head
);
9417 hpsa_sas_port
->parent_node
= hpsa_sas_node
;
9419 port
= sas_port_alloc_num(hpsa_sas_node
->parent_dev
);
9421 goto free_hpsa_port
;
9423 rc
= sas_port_add(port
);
9427 hpsa_sas_port
->port
= port
;
9428 hpsa_sas_port
->sas_address
= sas_address
;
9429 list_add_tail(&hpsa_sas_port
->port_list_entry
,
9430 &hpsa_sas_node
->port_list_head
);
9432 return hpsa_sas_port
;
9435 sas_port_free(port
);
9437 kfree(hpsa_sas_port
);
9442 static void hpsa_free_sas_port(struct hpsa_sas_port
*hpsa_sas_port
)
9444 struct hpsa_sas_phy
*hpsa_sas_phy
;
9445 struct hpsa_sas_phy
*next
;
9447 list_for_each_entry_safe(hpsa_sas_phy
, next
,
9448 &hpsa_sas_port
->phy_list_head
, phy_list_entry
)
9449 hpsa_free_sas_phy(hpsa_sas_phy
);
9451 sas_port_delete(hpsa_sas_port
->port
);
9452 list_del(&hpsa_sas_port
->port_list_entry
);
9453 kfree(hpsa_sas_port
);
9456 static struct hpsa_sas_node
*hpsa_alloc_sas_node(struct device
*parent_dev
)
9458 struct hpsa_sas_node
*hpsa_sas_node
;
9460 hpsa_sas_node
= kzalloc(sizeof(*hpsa_sas_node
), GFP_KERNEL
);
9461 if (hpsa_sas_node
) {
9462 hpsa_sas_node
->parent_dev
= parent_dev
;
9463 INIT_LIST_HEAD(&hpsa_sas_node
->port_list_head
);
9466 return hpsa_sas_node
;
9469 static void hpsa_free_sas_node(struct hpsa_sas_node
*hpsa_sas_node
)
9471 struct hpsa_sas_port
*hpsa_sas_port
;
9472 struct hpsa_sas_port
*next
;
9477 list_for_each_entry_safe(hpsa_sas_port
, next
,
9478 &hpsa_sas_node
->port_list_head
, port_list_entry
)
9479 hpsa_free_sas_port(hpsa_sas_port
);
9481 kfree(hpsa_sas_node
);
9484 static struct hpsa_scsi_dev_t
9485 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
9486 struct sas_rphy
*rphy
)
9489 struct hpsa_scsi_dev_t
*device
;
9491 for (i
= 0; i
< h
->ndevices
; i
++) {
9493 if (!device
->sas_port
)
9495 if (device
->sas_port
->rphy
== rphy
)
9502 static int hpsa_add_sas_host(struct ctlr_info
*h
)
9505 struct device
*parent_dev
;
9506 struct hpsa_sas_node
*hpsa_sas_node
;
9507 struct hpsa_sas_port
*hpsa_sas_port
;
9508 struct hpsa_sas_phy
*hpsa_sas_phy
;
9510 parent_dev
= &h
->scsi_host
->shost_gendev
;
9512 hpsa_sas_node
= hpsa_alloc_sas_node(parent_dev
);
9516 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, h
->sas_address
);
9517 if (!hpsa_sas_port
) {
9522 hpsa_sas_phy
= hpsa_alloc_sas_phy(hpsa_sas_port
);
9523 if (!hpsa_sas_phy
) {
9528 rc
= hpsa_sas_port_add_phy(hpsa_sas_phy
);
9532 h
->sas_host
= hpsa_sas_node
;
9537 hpsa_free_sas_phy(hpsa_sas_phy
);
9539 hpsa_free_sas_port(hpsa_sas_port
);
9541 hpsa_free_sas_node(hpsa_sas_node
);
9546 static void hpsa_delete_sas_host(struct ctlr_info
*h
)
9548 hpsa_free_sas_node(h
->sas_host
);
9551 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
9552 struct hpsa_scsi_dev_t
*device
)
9555 struct hpsa_sas_port
*hpsa_sas_port
;
9556 struct sas_rphy
*rphy
;
9558 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, device
->sas_address
);
9562 rphy
= sas_end_device_alloc(hpsa_sas_port
->port
);
9568 hpsa_sas_port
->rphy
= rphy
;
9569 device
->sas_port
= hpsa_sas_port
;
9571 rc
= hpsa_sas_port_add_rphy(hpsa_sas_port
, rphy
);
9578 hpsa_free_sas_port(hpsa_sas_port
);
9579 device
->sas_port
= NULL
;
9584 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
)
9586 if (device
->sas_port
) {
9587 hpsa_free_sas_port(device
->sas_port
);
9588 device
->sas_port
= NULL
;
9593 hpsa_sas_get_linkerrors(struct sas_phy
*phy
)
9599 hpsa_sas_get_enclosure_identifier(struct sas_rphy
*rphy
, u64
*identifier
)
9605 hpsa_sas_get_bay_identifier(struct sas_rphy
*rphy
)
9611 hpsa_sas_phy_reset(struct sas_phy
*phy
, int hard_reset
)
9617 hpsa_sas_phy_enable(struct sas_phy
*phy
, int enable
)
9623 hpsa_sas_phy_setup(struct sas_phy
*phy
)
9629 hpsa_sas_phy_release(struct sas_phy
*phy
)
9634 hpsa_sas_phy_speed(struct sas_phy
*phy
, struct sas_phy_linkrates
*rates
)
9639 /* SMP = Serial Management Protocol */
9641 hpsa_sas_smp_handler(struct Scsi_Host
*shost
, struct sas_rphy
*rphy
,
9642 struct request
*req
)
9647 static struct sas_function_template hpsa_sas_transport_functions
= {
9648 .get_linkerrors
= hpsa_sas_get_linkerrors
,
9649 .get_enclosure_identifier
= hpsa_sas_get_enclosure_identifier
,
9650 .get_bay_identifier
= hpsa_sas_get_bay_identifier
,
9651 .phy_reset
= hpsa_sas_phy_reset
,
9652 .phy_enable
= hpsa_sas_phy_enable
,
9653 .phy_setup
= hpsa_sas_phy_setup
,
9654 .phy_release
= hpsa_sas_phy_release
,
9655 .set_phy_speed
= hpsa_sas_phy_speed
,
9656 .smp_handler
= hpsa_sas_smp_handler
,
9660 * This is it. Register the PCI driver information for the cards we control
9661 * the OS will call our registered routines when it finds one of our cards.
9663 static int __init
hpsa_init(void)
9667 hpsa_sas_transport_template
=
9668 sas_attach_transport(&hpsa_sas_transport_functions
);
9669 if (!hpsa_sas_transport_template
)
9672 rc
= pci_register_driver(&hpsa_pci_driver
);
9675 sas_release_transport(hpsa_sas_transport_template
);
9680 static void __exit
hpsa_cleanup(void)
9682 pci_unregister_driver(&hpsa_pci_driver
);
9683 sas_release_transport(hpsa_sas_transport_template
);
9686 static void __attribute__((unused
)) verify_offsets(void)
9688 #define VERIFY_OFFSET(member, offset) \
9689 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9691 VERIFY_OFFSET(structure_size
, 0);
9692 VERIFY_OFFSET(volume_blk_size
, 4);
9693 VERIFY_OFFSET(volume_blk_cnt
, 8);
9694 VERIFY_OFFSET(phys_blk_shift
, 16);
9695 VERIFY_OFFSET(parity_rotation_shift
, 17);
9696 VERIFY_OFFSET(strip_size
, 18);
9697 VERIFY_OFFSET(disk_starting_blk
, 20);
9698 VERIFY_OFFSET(disk_blk_cnt
, 28);
9699 VERIFY_OFFSET(data_disks_per_row
, 36);
9700 VERIFY_OFFSET(metadata_disks_per_row
, 38);
9701 VERIFY_OFFSET(row_cnt
, 40);
9702 VERIFY_OFFSET(layout_map_count
, 42);
9703 VERIFY_OFFSET(flags
, 44);
9704 VERIFY_OFFSET(dekindex
, 46);
9705 /* VERIFY_OFFSET(reserved, 48 */
9706 VERIFY_OFFSET(data
, 64);
9708 #undef VERIFY_OFFSET
9710 #define VERIFY_OFFSET(member, offset) \
9711 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9713 VERIFY_OFFSET(IU_type
, 0);
9714 VERIFY_OFFSET(direction
, 1);
9715 VERIFY_OFFSET(reply_queue
, 2);
9716 /* VERIFY_OFFSET(reserved1, 3); */
9717 VERIFY_OFFSET(scsi_nexus
, 4);
9718 VERIFY_OFFSET(Tag
, 8);
9719 VERIFY_OFFSET(cdb
, 16);
9720 VERIFY_OFFSET(cciss_lun
, 32);
9721 VERIFY_OFFSET(data_len
, 40);
9722 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
9723 VERIFY_OFFSET(sg_count
, 45);
9724 /* VERIFY_OFFSET(reserved3 */
9725 VERIFY_OFFSET(err_ptr
, 48);
9726 VERIFY_OFFSET(err_len
, 56);
9727 /* VERIFY_OFFSET(reserved4 */
9728 VERIFY_OFFSET(sg
, 64);
9730 #undef VERIFY_OFFSET
9732 #define VERIFY_OFFSET(member, offset) \
9733 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9735 VERIFY_OFFSET(dev_handle
, 0x00);
9736 VERIFY_OFFSET(reserved1
, 0x02);
9737 VERIFY_OFFSET(function
, 0x03);
9738 VERIFY_OFFSET(reserved2
, 0x04);
9739 VERIFY_OFFSET(err_info
, 0x0C);
9740 VERIFY_OFFSET(reserved3
, 0x10);
9741 VERIFY_OFFSET(err_info_len
, 0x12);
9742 VERIFY_OFFSET(reserved4
, 0x13);
9743 VERIFY_OFFSET(sgl_offset
, 0x14);
9744 VERIFY_OFFSET(reserved5
, 0x15);
9745 VERIFY_OFFSET(transfer_len
, 0x1C);
9746 VERIFY_OFFSET(reserved6
, 0x20);
9747 VERIFY_OFFSET(io_flags
, 0x24);
9748 VERIFY_OFFSET(reserved7
, 0x26);
9749 VERIFY_OFFSET(LUN
, 0x34);
9750 VERIFY_OFFSET(control
, 0x3C);
9751 VERIFY_OFFSET(CDB
, 0x40);
9752 VERIFY_OFFSET(reserved8
, 0x50);
9753 VERIFY_OFFSET(host_context_flags
, 0x60);
9754 VERIFY_OFFSET(timeout_sec
, 0x62);
9755 VERIFY_OFFSET(ReplyQueue
, 0x64);
9756 VERIFY_OFFSET(reserved9
, 0x65);
9757 VERIFY_OFFSET(tag
, 0x68);
9758 VERIFY_OFFSET(host_addr
, 0x70);
9759 VERIFY_OFFSET(CISS_LUN
, 0x78);
9760 VERIFY_OFFSET(SG
, 0x78 + 8);
9761 #undef VERIFY_OFFSET
9764 module_init(hpsa_init
);
9765 module_exit(hpsa_cleanup
);