[SCSI] ipr: Fix sparse error in ipr driver
[deliverable/linux.git] / drivers / scsi / ipr.c
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90 * Global Data
91 */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 .mailbox = 0x0042C,
108 .max_cmds = 100,
109 .cache_line_size = 0x20,
110 .clear_isr = 1,
111 .iopoll_weight = 0,
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
133 .max_cmds = 100,
134 .cache_line_size = 0x20,
135 .clear_isr = 1,
136 .iopoll_weight = 0,
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
154 }
155 },
156 { /* CRoC */
157 .mailbox = 0x00044,
158 .max_cmds = 1000,
159 .cache_line_size = 0x20,
160 .clear_isr = 0,
161 .iopoll_weight = 64,
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
183 }
184 },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /* A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
284 {0x02040400, 0, 0,
285 "34FF: Disk device format in progress"},
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
288 {0x023F0000, 0, 0,
289 "Synchronization required"},
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
360 {0x04448700, 0, 0,
361 "ATA device status error"},
362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "FFF4: Command to logical unit failed"},
390 {0x05240000, 1, 0,
391 "Illegal request, invalid request type or request packet"},
392 {0x05250000, 0, 0,
393 "Illegal request, invalid resource handle"},
394 {0x05258000, 0, 0,
395 "Illegal request, commands not allowed to this device"},
396 {0x05258100, 0, 0,
397 "Illegal request, command not allowed to a secondary adapter"},
398 {0x05258200, 0, 0,
399 "Illegal request, command not allowed to a non-optimized resource"},
400 {0x05260000, 0, 0,
401 "Illegal request, invalid field in parameter list"},
402 {0x05260100, 0, 0,
403 "Illegal request, parameter not supported"},
404 {0x05260200, 0, 0,
405 "Illegal request, parameter value invalid"},
406 {0x052C0000, 0, 0,
407 "Illegal request, command sequence error"},
408 {0x052C8000, 1, 0,
409 "Illegal request, dual adapter support not enabled"},
410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9031: Array protection temporarily suspended, protection resuming"},
412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9040: Array protection temporarily suspended, protection resuming"},
414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415 "3140: Device bus not ready to ready transition"},
416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset"},
418 {0x06290500, 0, 0,
419 "FFFE: SCSI bus transition to single ended"},
420 {0x06290600, 0, 0,
421 "FFFE: SCSI bus transition to LVD"},
422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "FFFB: SCSI bus was reset by another initiator"},
424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "3029: A device replacement has occurred"},
426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9051: IOA cache data exists for a missing or failed device"},
428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9025: Disk unit is not supported at its physical location"},
432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "3020: IOA detected a SCSI bus configuration error"},
434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3150: SCSI bus configuration error"},
436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9074: Asymmetric advanced function disk configuration"},
438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4040: Incomplete multipath connection between IOA and enclosure"},
440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4041: Incomplete multipath connection between enclosure and device"},
442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9075: Incomplete multipath connection between IOA and remote IOA"},
444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9076: Configuration error, missing remote IOA"},
446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4050: Enclosure does not support a required multipath function"},
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9041: Array protection temporarily suspended"},
452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9042: Corrupt array parity detected on specified device"},
454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9030: Array no longer protected due to missing or failed disk unit"},
456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9071: Link operational transition"},
458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9072: Link not operational transition"},
460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9032: Array exposed but still protected"},
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4061: Multipath redundancy level got better"},
466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4060: Multipath redundancy level got worse"},
468 {0x07270000, 0, 0,
469 "Failure due to other device"},
470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9008: IOA does not support functions expected by devices"},
472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9010: Cache data associated with attached devices cannot be found"},
474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9011: Cache data belongs to devices other than those attached"},
476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9020: Array missing 2 or more devices with only 1 device present"},
478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9021: Array missing 2 or more devices with 2 or more devices present"},
480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9022: Exposed array is missing a required device"},
482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9023: Array member(s) not at required physical locations"},
484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9024: Array not functional due to present hardware configuration"},
486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9026: Array not functional due to present hardware configuration"},
488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9027: Array is missing a device and parity is out of sync"},
490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9028: Maximum number of arrays already exist"},
492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9050: Required cache data cannot be located for a disk unit"},
494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9052: Cache data exists for a device that has been modified"},
496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9054: IOA resources not available due to previous problems"},
498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9092: Disk unit requires initialization before use"},
500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9029: Incorrect hardware configuration change has been detected"},
502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9060: One or more disk pairs are missing from an array"},
504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9061: One or more disks are missing from an array"},
506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9062: One or more disks are missing from an array"},
508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9063: Maximum number of functional arrays has been exceeded"},
510 {0x0B260000, 0, 0,
511 "Aborted command, invalid descriptor"},
512 {0x0B5A0000, 0, 0,
513 "Command terminated by host"}
514 };
515
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530 };
531
532 /*
533 * Function Prototypes
534 */
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
541
542 #ifdef CONFIG_SCSI_IPR_TRACE
543 /**
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
546 * @type: trace type
547 * @add_data: additional data
548 *
549 * Return value:
550 * none
551 **/
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
554 {
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557
558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
565 else
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
570 wmb();
571 }
572 #else
573 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
574 #endif
575
576 /**
577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
579 *
580 * Return value:
581 * none
582 **/
583 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
584 {
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
591 }
592
593 /**
594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
596 *
597 * Return value:
598 * none
599 **/
600 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
601 {
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
606 int hrrq_id;
607
608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
611 ioarcb->data_transfer_length = 0;
612 ioarcb->read_data_transfer_length = 0;
613 ioarcb->ioadl_len = 0;
614 ioarcb->read_ioadl_len = 0;
615
616 if (ipr_cmd->ioa_cfg->sis64) {
617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
619 ioasa64->u.gata.status = 0;
620 } else {
621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
624 ioasa->u.gata.status = 0;
625 }
626
627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
629 ipr_cmd->scsi_cmd = NULL;
630 ipr_cmd->qc = NULL;
631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
633 }
634
635 /**
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
638 *
639 * Return value:
640 * none
641 **/
642 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
644 {
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
648 ipr_cmd->fast_done = fast_done;
649 init_timer(&ipr_cmd->timer);
650 }
651
652 /**
653 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
654 * @ioa_cfg: ioa config struct
655 *
656 * Return value:
657 * pointer to ipr command struct
658 **/
659 static
660 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
661 {
662 struct ipr_cmnd *ipr_cmd = NULL;
663
664 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
665 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
666 struct ipr_cmnd, queue);
667 list_del(&ipr_cmd->queue);
668 }
669
670
671 return ipr_cmd;
672 }
673
674 /**
675 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
676 * @ioa_cfg: ioa config struct
677 *
678 * Return value:
679 * pointer to ipr command struct
680 **/
681 static
682 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
683 {
684 struct ipr_cmnd *ipr_cmd =
685 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
686 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
687 return ipr_cmd;
688 }
689
690 /**
691 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
692 * @ioa_cfg: ioa config struct
693 * @clr_ints: interrupts to clear
694 *
695 * This function masks all interrupts on the adapter, then clears the
696 * interrupts specified in the mask
697 *
698 * Return value:
699 * none
700 **/
701 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
702 u32 clr_ints)
703 {
704 volatile u32 int_reg;
705 int i;
706
707 /* Stop new interrupts */
708 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
709 spin_lock(&ioa_cfg->hrrq[i]._lock);
710 ioa_cfg->hrrq[i].allow_interrupts = 0;
711 spin_unlock(&ioa_cfg->hrrq[i]._lock);
712 }
713 wmb();
714
715 /* Set interrupt mask to stop all new interrupts */
716 if (ioa_cfg->sis64)
717 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
718 else
719 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
720
721 /* Clear any pending interrupts */
722 if (ioa_cfg->sis64)
723 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
724 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
725 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
726 }
727
728 /**
729 * ipr_save_pcix_cmd_reg - Save PCI-X command register
730 * @ioa_cfg: ioa config struct
731 *
732 * Return value:
733 * 0 on success / -EIO on failure
734 **/
735 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
736 {
737 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
738
739 if (pcix_cmd_reg == 0)
740 return 0;
741
742 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
745 return -EIO;
746 }
747
748 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
749 return 0;
750 }
751
752 /**
753 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
754 * @ioa_cfg: ioa config struct
755 *
756 * Return value:
757 * 0 on success / -EIO on failure
758 **/
759 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
760 {
761 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
762
763 if (pcix_cmd_reg) {
764 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
765 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
766 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
767 return -EIO;
768 }
769 }
770
771 return 0;
772 }
773
774 /**
775 * ipr_sata_eh_done - done function for aborted SATA commands
776 * @ipr_cmd: ipr command struct
777 *
778 * This function is invoked for ops generated to SATA
779 * devices which are being aborted.
780 *
781 * Return value:
782 * none
783 **/
784 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
785 {
786 struct ata_queued_cmd *qc = ipr_cmd->qc;
787 struct ipr_sata_port *sata_port = qc->ap->private_data;
788
789 qc->err_mask |= AC_ERR_OTHER;
790 sata_port->ioasa.status |= ATA_BUSY;
791 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
792 ata_qc_complete(qc);
793 }
794
795 /**
796 * ipr_scsi_eh_done - mid-layer done function for aborted ops
797 * @ipr_cmd: ipr command struct
798 *
799 * This function is invoked by the interrupt handler for
800 * ops generated by the SCSI mid-layer which are being aborted.
801 *
802 * Return value:
803 * none
804 **/
805 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
806 {
807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
808
809 scsi_cmd->result |= (DID_ERROR << 16);
810
811 scsi_dma_unmap(ipr_cmd->scsi_cmd);
812 scsi_cmd->scsi_done(scsi_cmd);
813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
814 }
815
816 /**
817 * ipr_fail_all_ops - Fails all outstanding ops.
818 * @ioa_cfg: ioa config struct
819 *
820 * This function fails all outstanding ops.
821 *
822 * Return value:
823 * none
824 **/
825 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
826 {
827 struct ipr_cmnd *ipr_cmd, *temp;
828 struct ipr_hrr_queue *hrrq;
829
830 ENTER;
831 for_each_hrrq(hrrq, ioa_cfg) {
832 spin_lock(&hrrq->_lock);
833 list_for_each_entry_safe(ipr_cmd,
834 temp, &hrrq->hrrq_pending_q, queue) {
835 list_del(&ipr_cmd->queue);
836
837 ipr_cmd->s.ioasa.hdr.ioasc =
838 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
839 ipr_cmd->s.ioasa.hdr.ilid =
840 cpu_to_be32(IPR_DRIVER_ILID);
841
842 if (ipr_cmd->scsi_cmd)
843 ipr_cmd->done = ipr_scsi_eh_done;
844 else if (ipr_cmd->qc)
845 ipr_cmd->done = ipr_sata_eh_done;
846
847 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
848 IPR_IOASC_IOA_WAS_RESET);
849 del_timer(&ipr_cmd->timer);
850 ipr_cmd->done(ipr_cmd);
851 }
852 spin_unlock(&hrrq->_lock);
853 }
854 LEAVE;
855 }
856
857 /**
858 * ipr_send_command - Send driver initiated requests.
859 * @ipr_cmd: ipr command struct
860 *
861 * This function sends a command to the adapter using the correct write call.
862 * In the case of sis64, calculate the ioarcb size required. Then or in the
863 * appropriate bits.
864 *
865 * Return value:
866 * none
867 **/
868 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
869 {
870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
871 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
872
873 if (ioa_cfg->sis64) {
874 /* The default size is 256 bytes */
875 send_dma_addr |= 0x1;
876
877 /* If the number of ioadls * size of ioadl > 128 bytes,
878 then use a 512 byte ioarcb */
879 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
880 send_dma_addr |= 0x4;
881 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
882 } else
883 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
884 }
885
886 /**
887 * ipr_do_req - Send driver initiated requests.
888 * @ipr_cmd: ipr command struct
889 * @done: done function
890 * @timeout_func: timeout function
891 * @timeout: timeout value
892 *
893 * This function sends the specified command to the adapter with the
894 * timeout given. The done function is invoked on command completion.
895 *
896 * Return value:
897 * none
898 **/
899 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
900 void (*done) (struct ipr_cmnd *),
901 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
902 {
903 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
904
905 ipr_cmd->done = done;
906
907 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
908 ipr_cmd->timer.expires = jiffies + timeout;
909 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
910
911 add_timer(&ipr_cmd->timer);
912
913 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
914
915 ipr_send_command(ipr_cmd);
916 }
917
918 /**
919 * ipr_internal_cmd_done - Op done function for an internally generated op.
920 * @ipr_cmd: ipr command struct
921 *
922 * This function is the op done function for an internally generated,
923 * blocking op. It simply wakes the sleeping thread.
924 *
925 * Return value:
926 * none
927 **/
928 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
929 {
930 if (ipr_cmd->sibling)
931 ipr_cmd->sibling = NULL;
932 else
933 complete(&ipr_cmd->completion);
934 }
935
936 /**
937 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
938 * @ipr_cmd: ipr command struct
939 * @dma_addr: dma address
940 * @len: transfer length
941 * @flags: ioadl flag value
942 *
943 * This function initializes an ioadl in the case where there is only a single
944 * descriptor.
945 *
946 * Return value:
947 * nothing
948 **/
949 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
950 u32 len, int flags)
951 {
952 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
953 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
954
955 ipr_cmd->dma_use_sg = 1;
956
957 if (ipr_cmd->ioa_cfg->sis64) {
958 ioadl64->flags = cpu_to_be32(flags);
959 ioadl64->data_len = cpu_to_be32(len);
960 ioadl64->address = cpu_to_be64(dma_addr);
961
962 ipr_cmd->ioarcb.ioadl_len =
963 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
964 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
965 } else {
966 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
967 ioadl->address = cpu_to_be32(dma_addr);
968
969 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
970 ipr_cmd->ioarcb.read_ioadl_len =
971 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
972 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
973 } else {
974 ipr_cmd->ioarcb.ioadl_len =
975 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
977 }
978 }
979 }
980
981 /**
982 * ipr_send_blocking_cmd - Send command and sleep on its completion.
983 * @ipr_cmd: ipr command struct
984 * @timeout_func: function to invoke if command times out
985 * @timeout: timeout
986 *
987 * Return value:
988 * none
989 **/
990 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
991 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
992 u32 timeout)
993 {
994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
995
996 init_completion(&ipr_cmd->completion);
997 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
998
999 spin_unlock_irq(ioa_cfg->host->host_lock);
1000 wait_for_completion(&ipr_cmd->completion);
1001 spin_lock_irq(ioa_cfg->host->host_lock);
1002 }
1003
1004 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1005 {
1006 if (ioa_cfg->hrrq_num == 1)
1007 return 0;
1008 else
1009 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1010 }
1011
1012 /**
1013 * ipr_send_hcam - Send an HCAM to the adapter.
1014 * @ioa_cfg: ioa config struct
1015 * @type: HCAM type
1016 * @hostrcb: hostrcb struct
1017 *
1018 * This function will send a Host Controlled Async command to the adapter.
1019 * If HCAMs are currently not allowed to be issued to the adapter, it will
1020 * place the hostrcb on the free queue.
1021 *
1022 * Return value:
1023 * none
1024 **/
1025 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1026 struct ipr_hostrcb *hostrcb)
1027 {
1028 struct ipr_cmnd *ipr_cmd;
1029 struct ipr_ioarcb *ioarcb;
1030
1031 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1032 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1034 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1035
1036 ipr_cmd->u.hostrcb = hostrcb;
1037 ioarcb = &ipr_cmd->ioarcb;
1038
1039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1040 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1041 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1042 ioarcb->cmd_pkt.cdb[1] = type;
1043 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1044 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1045
1046 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1047 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1048
1049 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1050 ipr_cmd->done = ipr_process_ccn;
1051 else
1052 ipr_cmd->done = ipr_process_error;
1053
1054 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1055
1056 ipr_send_command(ipr_cmd);
1057 } else {
1058 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1059 }
1060 }
1061
1062 /**
1063 * ipr_update_ata_class - Update the ata class in the resource entry
1064 * @res: resource entry struct
1065 * @proto: cfgte device bus protocol value
1066 *
1067 * Return value:
1068 * none
1069 **/
1070 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1071 {
1072 switch (proto) {
1073 case IPR_PROTO_SATA:
1074 case IPR_PROTO_SAS_STP:
1075 res->ata_class = ATA_DEV_ATA;
1076 break;
1077 case IPR_PROTO_SATA_ATAPI:
1078 case IPR_PROTO_SAS_STP_ATAPI:
1079 res->ata_class = ATA_DEV_ATAPI;
1080 break;
1081 default:
1082 res->ata_class = ATA_DEV_UNKNOWN;
1083 break;
1084 };
1085 }
1086
1087 /**
1088 * ipr_init_res_entry - Initialize a resource entry struct.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1097 {
1098 int found = 0;
1099 unsigned int proto;
1100 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1101 struct ipr_resource_entry *gscsi_res = NULL;
1102
1103 res->needs_sync_complete = 0;
1104 res->in_erp = 0;
1105 res->add_to_ml = 0;
1106 res->del_from_ml = 0;
1107 res->resetting_device = 0;
1108 res->sdev = NULL;
1109 res->sata_port = NULL;
1110
1111 if (ioa_cfg->sis64) {
1112 proto = cfgtew->u.cfgte64->proto;
1113 res->res_flags = cfgtew->u.cfgte64->res_flags;
1114 res->qmodel = IPR_QUEUEING_MODEL64(res);
1115 res->type = cfgtew->u.cfgte64->res_type;
1116
1117 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1118 sizeof(res->res_path));
1119
1120 res->bus = 0;
1121 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 sizeof(res->dev_lun.scsi_lun));
1123 res->lun = scsilun_to_int(&res->dev_lun);
1124
1125 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1126 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1127 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1128 found = 1;
1129 res->target = gscsi_res->target;
1130 break;
1131 }
1132 }
1133 if (!found) {
1134 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1135 ioa_cfg->max_devs_supported);
1136 set_bit(res->target, ioa_cfg->target_ids);
1137 }
1138 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1139 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1140 res->target = 0;
1141 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1142 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1143 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1144 ioa_cfg->max_devs_supported);
1145 set_bit(res->target, ioa_cfg->array_ids);
1146 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1147 res->bus = IPR_VSET_VIRTUAL_BUS;
1148 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1149 ioa_cfg->max_devs_supported);
1150 set_bit(res->target, ioa_cfg->vset_ids);
1151 } else {
1152 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->target_ids);
1155 }
1156 } else {
1157 proto = cfgtew->u.cfgte->proto;
1158 res->qmodel = IPR_QUEUEING_MODEL(res);
1159 res->flags = cfgtew->u.cfgte->flags;
1160 if (res->flags & IPR_IS_IOA_RESOURCE)
1161 res->type = IPR_RES_TYPE_IOAFP;
1162 else
1163 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1164
1165 res->bus = cfgtew->u.cfgte->res_addr.bus;
1166 res->target = cfgtew->u.cfgte->res_addr.target;
1167 res->lun = cfgtew->u.cfgte->res_addr.lun;
1168 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1169 }
1170
1171 ipr_update_ata_class(res, proto);
1172 }
1173
1174 /**
1175 * ipr_is_same_device - Determine if two devices are the same.
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1178 *
1179 * Return value:
1180 * 1 if the devices are the same / 0 otherwise
1181 **/
1182 static int ipr_is_same_device(struct ipr_resource_entry *res,
1183 struct ipr_config_table_entry_wrapper *cfgtew)
1184 {
1185 if (res->ioa_cfg->sis64) {
1186 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1187 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1188 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1189 sizeof(cfgtew->u.cfgte64->lun))) {
1190 return 1;
1191 }
1192 } else {
1193 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1194 res->target == cfgtew->u.cfgte->res_addr.target &&
1195 res->lun == cfgtew->u.cfgte->res_addr.lun)
1196 return 1;
1197 }
1198
1199 return 0;
1200 }
1201
1202 /**
1203 * __ipr_format_res_path - Format the resource path for printing.
1204 * @res_path: resource path
1205 * @buf: buffer
1206 * @len: length of buffer provided
1207 *
1208 * Return value:
1209 * pointer to buffer
1210 **/
1211 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1212 {
1213 int i;
1214 char *p = buffer;
1215
1216 *p = '\0';
1217 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1218 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1219 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1220
1221 return buffer;
1222 }
1223
1224 /**
1225 * ipr_format_res_path - Format the resource path for printing.
1226 * @ioa_cfg: ioa config struct
1227 * @res_path: resource path
1228 * @buf: buffer
1229 * @len: length of buffer provided
1230 *
1231 * Return value:
1232 * pointer to buffer
1233 **/
1234 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1235 u8 *res_path, char *buffer, int len)
1236 {
1237 char *p = buffer;
1238
1239 *p = '\0';
1240 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1241 __ipr_format_res_path(res_path, p, len - (buffer - p));
1242 return buffer;
1243 }
1244
1245 /**
1246 * ipr_update_res_entry - Update the resource entry.
1247 * @res: resource entry struct
1248 * @cfgtew: config table entry wrapper struct
1249 *
1250 * Return value:
1251 * none
1252 **/
1253 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1254 struct ipr_config_table_entry_wrapper *cfgtew)
1255 {
1256 char buffer[IPR_MAX_RES_PATH_LENGTH];
1257 unsigned int proto;
1258 int new_path = 0;
1259
1260 if (res->ioa_cfg->sis64) {
1261 res->flags = cfgtew->u.cfgte64->flags;
1262 res->res_flags = cfgtew->u.cfgte64->res_flags;
1263 res->type = cfgtew->u.cfgte64->res_type;
1264
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1266 sizeof(struct ipr_std_inq_data));
1267
1268 res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 proto = cfgtew->u.cfgte64->proto;
1270 res->res_handle = cfgtew->u.cfgte64->res_handle;
1271 res->dev_id = cfgtew->u.cfgte64->dev_id;
1272
1273 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274 sizeof(res->dev_lun.scsi_lun));
1275
1276 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1277 sizeof(res->res_path))) {
1278 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1279 sizeof(res->res_path));
1280 new_path = 1;
1281 }
1282
1283 if (res->sdev && new_path)
1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1285 ipr_format_res_path(res->ioa_cfg,
1286 res->res_path, buffer, sizeof(buffer)));
1287 } else {
1288 res->flags = cfgtew->u.cfgte->flags;
1289 if (res->flags & IPR_IS_IOA_RESOURCE)
1290 res->type = IPR_RES_TYPE_IOAFP;
1291 else
1292 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1293
1294 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1295 sizeof(struct ipr_std_inq_data));
1296
1297 res->qmodel = IPR_QUEUEING_MODEL(res);
1298 proto = cfgtew->u.cfgte->proto;
1299 res->res_handle = cfgtew->u.cfgte->res_handle;
1300 }
1301
1302 ipr_update_ata_class(res, proto);
1303 }
1304
1305 /**
1306 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1307 * for the resource.
1308 * @res: resource entry struct
1309 * @cfgtew: config table entry wrapper struct
1310 *
1311 * Return value:
1312 * none
1313 **/
1314 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1315 {
1316 struct ipr_resource_entry *gscsi_res = NULL;
1317 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1318
1319 if (!ioa_cfg->sis64)
1320 return;
1321
1322 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1323 clear_bit(res->target, ioa_cfg->array_ids);
1324 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1325 clear_bit(res->target, ioa_cfg->vset_ids);
1326 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1327 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1328 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1329 return;
1330 clear_bit(res->target, ioa_cfg->target_ids);
1331
1332 } else if (res->bus == 0)
1333 clear_bit(res->target, ioa_cfg->target_ids);
1334 }
1335
1336 /**
1337 * ipr_handle_config_change - Handle a config change from the adapter
1338 * @ioa_cfg: ioa config struct
1339 * @hostrcb: hostrcb
1340 *
1341 * Return value:
1342 * none
1343 **/
1344 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1345 struct ipr_hostrcb *hostrcb)
1346 {
1347 struct ipr_resource_entry *res = NULL;
1348 struct ipr_config_table_entry_wrapper cfgtew;
1349 __be32 cc_res_handle;
1350
1351 u32 is_ndn = 1;
1352
1353 if (ioa_cfg->sis64) {
1354 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1355 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1356 } else {
1357 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1358 cc_res_handle = cfgtew.u.cfgte->res_handle;
1359 }
1360
1361 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1362 if (res->res_handle == cc_res_handle) {
1363 is_ndn = 0;
1364 break;
1365 }
1366 }
1367
1368 if (is_ndn) {
1369 if (list_empty(&ioa_cfg->free_res_q)) {
1370 ipr_send_hcam(ioa_cfg,
1371 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1372 hostrcb);
1373 return;
1374 }
1375
1376 res = list_entry(ioa_cfg->free_res_q.next,
1377 struct ipr_resource_entry, queue);
1378
1379 list_del(&res->queue);
1380 ipr_init_res_entry(res, &cfgtew);
1381 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1382 }
1383
1384 ipr_update_res_entry(res, &cfgtew);
1385
1386 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1387 if (res->sdev) {
1388 res->del_from_ml = 1;
1389 res->res_handle = IPR_INVALID_RES_HANDLE;
1390 if (ioa_cfg->allow_ml_add_del)
1391 schedule_work(&ioa_cfg->work_q);
1392 } else {
1393 ipr_clear_res_target(res);
1394 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1395 }
1396 } else if (!res->sdev || res->del_from_ml) {
1397 res->add_to_ml = 1;
1398 if (ioa_cfg->allow_ml_add_del)
1399 schedule_work(&ioa_cfg->work_q);
1400 }
1401
1402 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1403 }
1404
1405 /**
1406 * ipr_process_ccn - Op done function for a CCN.
1407 * @ipr_cmd: ipr command struct
1408 *
1409 * This function is the op done function for a configuration
1410 * change notification host controlled async from the adapter.
1411 *
1412 * Return value:
1413 * none
1414 **/
1415 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1416 {
1417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1418 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1420
1421 list_del(&hostrcb->queue);
1422 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1423
1424 if (ioasc) {
1425 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1426 dev_err(&ioa_cfg->pdev->dev,
1427 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1428
1429 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1430 } else {
1431 ipr_handle_config_change(ioa_cfg, hostrcb);
1432 }
1433 }
1434
1435 /**
1436 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1437 * @i: index into buffer
1438 * @buf: string to modify
1439 *
1440 * This function will strip all trailing whitespace, pad the end
1441 * of the string with a single space, and NULL terminate the string.
1442 *
1443 * Return value:
1444 * new length of string
1445 **/
1446 static int strip_and_pad_whitespace(int i, char *buf)
1447 {
1448 while (i && buf[i] == ' ')
1449 i--;
1450 buf[i+1] = ' ';
1451 buf[i+2] = '\0';
1452 return i + 2;
1453 }
1454
1455 /**
1456 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1457 * @prefix: string to print at start of printk
1458 * @hostrcb: hostrcb pointer
1459 * @vpd: vendor/product id/sn struct
1460 *
1461 * Return value:
1462 * none
1463 **/
1464 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1465 struct ipr_vpd *vpd)
1466 {
1467 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1468 int i = 0;
1469
1470 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1471 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1472
1473 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1474 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1475
1476 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1477 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1478
1479 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1480 }
1481
1482 /**
1483 * ipr_log_vpd - Log the passed VPD to the error log.
1484 * @vpd: vendor/product id/sn struct
1485 *
1486 * Return value:
1487 * none
1488 **/
1489 static void ipr_log_vpd(struct ipr_vpd *vpd)
1490 {
1491 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1492 + IPR_SERIAL_NUM_LEN];
1493
1494 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1495 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1496 IPR_PROD_ID_LEN);
1497 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1498 ipr_err("Vendor/Product ID: %s\n", buffer);
1499
1500 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1501 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1502 ipr_err(" Serial Number: %s\n", buffer);
1503 }
1504
1505 /**
1506 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1507 * @prefix: string to print at start of printk
1508 * @hostrcb: hostrcb pointer
1509 * @vpd: vendor/product id/sn/wwn struct
1510 *
1511 * Return value:
1512 * none
1513 **/
1514 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1515 struct ipr_ext_vpd *vpd)
1516 {
1517 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1518 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1519 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1520 }
1521
1522 /**
1523 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1524 * @vpd: vendor/product id/sn/wwn struct
1525 *
1526 * Return value:
1527 * none
1528 **/
1529 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1530 {
1531 ipr_log_vpd(&vpd->vpd);
1532 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1533 be32_to_cpu(vpd->wwid[1]));
1534 }
1535
1536 /**
1537 * ipr_log_enhanced_cache_error - Log a cache error.
1538 * @ioa_cfg: ioa config struct
1539 * @hostrcb: hostrcb struct
1540 *
1541 * Return value:
1542 * none
1543 **/
1544 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1545 struct ipr_hostrcb *hostrcb)
1546 {
1547 struct ipr_hostrcb_type_12_error *error;
1548
1549 if (ioa_cfg->sis64)
1550 error = &hostrcb->hcam.u.error64.u.type_12_error;
1551 else
1552 error = &hostrcb->hcam.u.error.u.type_12_error;
1553
1554 ipr_err("-----Current Configuration-----\n");
1555 ipr_err("Cache Directory Card Information:\n");
1556 ipr_log_ext_vpd(&error->ioa_vpd);
1557 ipr_err("Adapter Card Information:\n");
1558 ipr_log_ext_vpd(&error->cfc_vpd);
1559
1560 ipr_err("-----Expected Configuration-----\n");
1561 ipr_err("Cache Directory Card Information:\n");
1562 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1563 ipr_err("Adapter Card Information:\n");
1564 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1565
1566 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1567 be32_to_cpu(error->ioa_data[0]),
1568 be32_to_cpu(error->ioa_data[1]),
1569 be32_to_cpu(error->ioa_data[2]));
1570 }
1571
1572 /**
1573 * ipr_log_cache_error - Log a cache error.
1574 * @ioa_cfg: ioa config struct
1575 * @hostrcb: hostrcb struct
1576 *
1577 * Return value:
1578 * none
1579 **/
1580 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1581 struct ipr_hostrcb *hostrcb)
1582 {
1583 struct ipr_hostrcb_type_02_error *error =
1584 &hostrcb->hcam.u.error.u.type_02_error;
1585
1586 ipr_err("-----Current Configuration-----\n");
1587 ipr_err("Cache Directory Card Information:\n");
1588 ipr_log_vpd(&error->ioa_vpd);
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_vpd(&error->cfc_vpd);
1591
1592 ipr_err("-----Expected Configuration-----\n");
1593 ipr_err("Cache Directory Card Information:\n");
1594 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1595 ipr_err("Adapter Card Information:\n");
1596 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1597
1598 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1599 be32_to_cpu(error->ioa_data[0]),
1600 be32_to_cpu(error->ioa_data[1]),
1601 be32_to_cpu(error->ioa_data[2]));
1602 }
1603
1604 /**
1605 * ipr_log_enhanced_config_error - Log a configuration error.
1606 * @ioa_cfg: ioa config struct
1607 * @hostrcb: hostrcb struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1613 struct ipr_hostrcb *hostrcb)
1614 {
1615 int errors_logged, i;
1616 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1617 struct ipr_hostrcb_type_13_error *error;
1618
1619 error = &hostrcb->hcam.u.error.u.type_13_error;
1620 errors_logged = be32_to_cpu(error->errors_logged);
1621
1622 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1623 be32_to_cpu(error->errors_detected), errors_logged);
1624
1625 dev_entry = error->dev;
1626
1627 for (i = 0; i < errors_logged; i++, dev_entry++) {
1628 ipr_err_separator;
1629
1630 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1631 ipr_log_ext_vpd(&dev_entry->vpd);
1632
1633 ipr_err("-----New Device Information-----\n");
1634 ipr_log_ext_vpd(&dev_entry->new_vpd);
1635
1636 ipr_err("Cache Directory Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1638
1639 ipr_err("Adapter Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1641 }
1642 }
1643
1644 /**
1645 * ipr_log_sis64_config_error - Log a device error.
1646 * @ioa_cfg: ioa config struct
1647 * @hostrcb: hostrcb struct
1648 *
1649 * Return value:
1650 * none
1651 **/
1652 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1653 struct ipr_hostrcb *hostrcb)
1654 {
1655 int errors_logged, i;
1656 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1657 struct ipr_hostrcb_type_23_error *error;
1658 char buffer[IPR_MAX_RES_PATH_LENGTH];
1659
1660 error = &hostrcb->hcam.u.error64.u.type_23_error;
1661 errors_logged = be32_to_cpu(error->errors_logged);
1662
1663 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1664 be32_to_cpu(error->errors_detected), errors_logged);
1665
1666 dev_entry = error->dev;
1667
1668 for (i = 0; i < errors_logged; i++, dev_entry++) {
1669 ipr_err_separator;
1670
1671 ipr_err("Device %d : %s", i + 1,
1672 __ipr_format_res_path(dev_entry->res_path,
1673 buffer, sizeof(buffer)));
1674 ipr_log_ext_vpd(&dev_entry->vpd);
1675
1676 ipr_err("-----New Device Information-----\n");
1677 ipr_log_ext_vpd(&dev_entry->new_vpd);
1678
1679 ipr_err("Cache Directory Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1681
1682 ipr_err("Adapter Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1684 }
1685 }
1686
1687 /**
1688 * ipr_log_config_error - Log a configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1691 *
1692 * Return value:
1693 * none
1694 **/
1695 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1697 {
1698 int errors_logged, i;
1699 struct ipr_hostrcb_device_data_entry *dev_entry;
1700 struct ipr_hostrcb_type_03_error *error;
1701
1702 error = &hostrcb->hcam.u.error.u.type_03_error;
1703 errors_logged = be32_to_cpu(error->errors_logged);
1704
1705 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706 be32_to_cpu(error->errors_detected), errors_logged);
1707
1708 dev_entry = error->dev;
1709
1710 for (i = 0; i < errors_logged; i++, dev_entry++) {
1711 ipr_err_separator;
1712
1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1714 ipr_log_vpd(&dev_entry->vpd);
1715
1716 ipr_err("-----New Device Information-----\n");
1717 ipr_log_vpd(&dev_entry->new_vpd);
1718
1719 ipr_err("Cache Directory Card Information:\n");
1720 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1721
1722 ipr_err("Adapter Card Information:\n");
1723 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1724
1725 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1726 be32_to_cpu(dev_entry->ioa_data[0]),
1727 be32_to_cpu(dev_entry->ioa_data[1]),
1728 be32_to_cpu(dev_entry->ioa_data[2]),
1729 be32_to_cpu(dev_entry->ioa_data[3]),
1730 be32_to_cpu(dev_entry->ioa_data[4]));
1731 }
1732 }
1733
1734 /**
1735 * ipr_log_enhanced_array_error - Log an array configuration error.
1736 * @ioa_cfg: ioa config struct
1737 * @hostrcb: hostrcb struct
1738 *
1739 * Return value:
1740 * none
1741 **/
1742 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1743 struct ipr_hostrcb *hostrcb)
1744 {
1745 int i, num_entries;
1746 struct ipr_hostrcb_type_14_error *error;
1747 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1748 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1749
1750 error = &hostrcb->hcam.u.error.u.type_14_error;
1751
1752 ipr_err_separator;
1753
1754 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1755 error->protection_level,
1756 ioa_cfg->host->host_no,
1757 error->last_func_vset_res_addr.bus,
1758 error->last_func_vset_res_addr.target,
1759 error->last_func_vset_res_addr.lun);
1760
1761 ipr_err_separator;
1762
1763 array_entry = error->array_member;
1764 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1765 ARRAY_SIZE(error->array_member));
1766
1767 for (i = 0; i < num_entries; i++, array_entry++) {
1768 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1769 continue;
1770
1771 if (be32_to_cpu(error->exposed_mode_adn) == i)
1772 ipr_err("Exposed Array Member %d:\n", i);
1773 else
1774 ipr_err("Array Member %d:\n", i);
1775
1776 ipr_log_ext_vpd(&array_entry->vpd);
1777 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1778 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1779 "Expected Location");
1780
1781 ipr_err_separator;
1782 }
1783 }
1784
1785 /**
1786 * ipr_log_array_error - Log an array configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795 {
1796 int i;
1797 struct ipr_hostrcb_type_04_error *error;
1798 struct ipr_hostrcb_array_data_entry *array_entry;
1799 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1800
1801 error = &hostrcb->hcam.u.error.u.type_04_error;
1802
1803 ipr_err_separator;
1804
1805 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1806 error->protection_level,
1807 ioa_cfg->host->host_no,
1808 error->last_func_vset_res_addr.bus,
1809 error->last_func_vset_res_addr.target,
1810 error->last_func_vset_res_addr.lun);
1811
1812 ipr_err_separator;
1813
1814 array_entry = error->array_member;
1815
1816 for (i = 0; i < 18; i++) {
1817 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1818 continue;
1819
1820 if (be32_to_cpu(error->exposed_mode_adn) == i)
1821 ipr_err("Exposed Array Member %d:\n", i);
1822 else
1823 ipr_err("Array Member %d:\n", i);
1824
1825 ipr_log_vpd(&array_entry->vpd);
1826
1827 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1828 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1829 "Expected Location");
1830
1831 ipr_err_separator;
1832
1833 if (i == 9)
1834 array_entry = error->array_member2;
1835 else
1836 array_entry++;
1837 }
1838 }
1839
1840 /**
1841 * ipr_log_hex_data - Log additional hex IOA error data.
1842 * @ioa_cfg: ioa config struct
1843 * @data: IOA error data
1844 * @len: data length
1845 *
1846 * Return value:
1847 * none
1848 **/
1849 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1850 {
1851 int i;
1852
1853 if (len == 0)
1854 return;
1855
1856 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1857 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1858
1859 for (i = 0; i < len / 4; i += 4) {
1860 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1861 be32_to_cpu(data[i]),
1862 be32_to_cpu(data[i+1]),
1863 be32_to_cpu(data[i+2]),
1864 be32_to_cpu(data[i+3]));
1865 }
1866 }
1867
1868 /**
1869 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1870 * @ioa_cfg: ioa config struct
1871 * @hostrcb: hostrcb struct
1872 *
1873 * Return value:
1874 * none
1875 **/
1876 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_hostrcb *hostrcb)
1878 {
1879 struct ipr_hostrcb_type_17_error *error;
1880
1881 if (ioa_cfg->sis64)
1882 error = &hostrcb->hcam.u.error64.u.type_17_error;
1883 else
1884 error = &hostrcb->hcam.u.error.u.type_17_error;
1885
1886 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1887 strim(error->failure_reason);
1888
1889 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1890 be32_to_cpu(hostrcb->hcam.u.error.prc));
1891 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1892 ipr_log_hex_data(ioa_cfg, error->data,
1893 be32_to_cpu(hostrcb->hcam.length) -
1894 (offsetof(struct ipr_hostrcb_error, u) +
1895 offsetof(struct ipr_hostrcb_type_17_error, data)));
1896 }
1897
1898 /**
1899 * ipr_log_dual_ioa_error - Log a dual adapter error.
1900 * @ioa_cfg: ioa config struct
1901 * @hostrcb: hostrcb struct
1902 *
1903 * Return value:
1904 * none
1905 **/
1906 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1907 struct ipr_hostrcb *hostrcb)
1908 {
1909 struct ipr_hostrcb_type_07_error *error;
1910
1911 error = &hostrcb->hcam.u.error.u.type_07_error;
1912 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1913 strim(error->failure_reason);
1914
1915 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1916 be32_to_cpu(hostrcb->hcam.u.error.prc));
1917 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1918 ipr_log_hex_data(ioa_cfg, error->data,
1919 be32_to_cpu(hostrcb->hcam.length) -
1920 (offsetof(struct ipr_hostrcb_error, u) +
1921 offsetof(struct ipr_hostrcb_type_07_error, data)));
1922 }
1923
1924 static const struct {
1925 u8 active;
1926 char *desc;
1927 } path_active_desc[] = {
1928 { IPR_PATH_NO_INFO, "Path" },
1929 { IPR_PATH_ACTIVE, "Active path" },
1930 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1931 };
1932
1933 static const struct {
1934 u8 state;
1935 char *desc;
1936 } path_state_desc[] = {
1937 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1938 { IPR_PATH_HEALTHY, "is healthy" },
1939 { IPR_PATH_DEGRADED, "is degraded" },
1940 { IPR_PATH_FAILED, "is failed" }
1941 };
1942
1943 /**
1944 * ipr_log_fabric_path - Log a fabric path error
1945 * @hostrcb: hostrcb struct
1946 * @fabric: fabric descriptor
1947 *
1948 * Return value:
1949 * none
1950 **/
1951 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1952 struct ipr_hostrcb_fabric_desc *fabric)
1953 {
1954 int i, j;
1955 u8 path_state = fabric->path_state;
1956 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1957 u8 state = path_state & IPR_PATH_STATE_MASK;
1958
1959 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1960 if (path_active_desc[i].active != active)
1961 continue;
1962
1963 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1964 if (path_state_desc[j].state != state)
1965 continue;
1966
1967 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1968 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1969 path_active_desc[i].desc, path_state_desc[j].desc,
1970 fabric->ioa_port);
1971 } else if (fabric->cascaded_expander == 0xff) {
1972 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1973 path_active_desc[i].desc, path_state_desc[j].desc,
1974 fabric->ioa_port, fabric->phy);
1975 } else if (fabric->phy == 0xff) {
1976 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1977 path_active_desc[i].desc, path_state_desc[j].desc,
1978 fabric->ioa_port, fabric->cascaded_expander);
1979 } else {
1980 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1981 path_active_desc[i].desc, path_state_desc[j].desc,
1982 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1983 }
1984 return;
1985 }
1986 }
1987
1988 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1989 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1990 }
1991
1992 /**
1993 * ipr_log64_fabric_path - Log a fabric path error
1994 * @hostrcb: hostrcb struct
1995 * @fabric: fabric descriptor
1996 *
1997 * Return value:
1998 * none
1999 **/
2000 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2001 struct ipr_hostrcb64_fabric_desc *fabric)
2002 {
2003 int i, j;
2004 u8 path_state = fabric->path_state;
2005 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2006 u8 state = path_state & IPR_PATH_STATE_MASK;
2007 char buffer[IPR_MAX_RES_PATH_LENGTH];
2008
2009 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2010 if (path_active_desc[i].active != active)
2011 continue;
2012
2013 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2014 if (path_state_desc[j].state != state)
2015 continue;
2016
2017 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 ipr_format_res_path(hostrcb->ioa_cfg,
2020 fabric->res_path,
2021 buffer, sizeof(buffer)));
2022 return;
2023 }
2024 }
2025
2026 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2027 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2028 buffer, sizeof(buffer)));
2029 }
2030
2031 static const struct {
2032 u8 type;
2033 char *desc;
2034 } path_type_desc[] = {
2035 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2036 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2037 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2038 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2039 };
2040
2041 static const struct {
2042 u8 status;
2043 char *desc;
2044 } path_status_desc[] = {
2045 { IPR_PATH_CFG_NO_PROB, "Functional" },
2046 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2047 { IPR_PATH_CFG_FAILED, "Failed" },
2048 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2049 { IPR_PATH_NOT_DETECTED, "Missing" },
2050 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2051 };
2052
2053 static const char *link_rate[] = {
2054 "unknown",
2055 "disabled",
2056 "phy reset problem",
2057 "spinup hold",
2058 "port selector",
2059 "unknown",
2060 "unknown",
2061 "unknown",
2062 "1.5Gbps",
2063 "3.0Gbps",
2064 "unknown",
2065 "unknown",
2066 "unknown",
2067 "unknown",
2068 "unknown",
2069 "unknown"
2070 };
2071
2072 /**
2073 * ipr_log_path_elem - Log a fabric path element.
2074 * @hostrcb: hostrcb struct
2075 * @cfg: fabric path element struct
2076 *
2077 * Return value:
2078 * none
2079 **/
2080 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2081 struct ipr_hostrcb_config_element *cfg)
2082 {
2083 int i, j;
2084 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2085 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2086
2087 if (type == IPR_PATH_CFG_NOT_EXIST)
2088 return;
2089
2090 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2091 if (path_type_desc[i].type != type)
2092 continue;
2093
2094 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2095 if (path_status_desc[j].status != status)
2096 continue;
2097
2098 if (type == IPR_PATH_CFG_IOA_PORT) {
2099 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2100 path_status_desc[j].desc, path_type_desc[i].desc,
2101 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2102 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2103 } else {
2104 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2105 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2106 path_status_desc[j].desc, path_type_desc[i].desc,
2107 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2108 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2109 } else if (cfg->cascaded_expander == 0xff) {
2110 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2111 "WWN=%08X%08X\n", path_status_desc[j].desc,
2112 path_type_desc[i].desc, cfg->phy,
2113 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115 } else if (cfg->phy == 0xff) {
2116 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2117 "WWN=%08X%08X\n", path_status_desc[j].desc,
2118 path_type_desc[i].desc, cfg->cascaded_expander,
2119 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2120 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2121 } else {
2122 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2123 "WWN=%08X%08X\n", path_status_desc[j].desc,
2124 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2125 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2126 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2127 }
2128 }
2129 return;
2130 }
2131 }
2132
2133 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2134 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2135 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2136 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2137 }
2138
2139 /**
2140 * ipr_log64_path_elem - Log a fabric path element.
2141 * @hostrcb: hostrcb struct
2142 * @cfg: fabric path element struct
2143 *
2144 * Return value:
2145 * none
2146 **/
2147 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2148 struct ipr_hostrcb64_config_element *cfg)
2149 {
2150 int i, j;
2151 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2152 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2153 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2154 char buffer[IPR_MAX_RES_PATH_LENGTH];
2155
2156 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2157 return;
2158
2159 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2160 if (path_type_desc[i].type != type)
2161 continue;
2162
2163 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2164 if (path_status_desc[j].status != status)
2165 continue;
2166
2167 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2168 path_status_desc[j].desc, path_type_desc[i].desc,
2169 ipr_format_res_path(hostrcb->ioa_cfg,
2170 cfg->res_path, buffer, sizeof(buffer)),
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]),
2173 be32_to_cpu(cfg->wwid[1]));
2174 return;
2175 }
2176 }
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status,
2179 ipr_format_res_path(hostrcb->ioa_cfg,
2180 cfg->res_path, buffer, sizeof(buffer)),
2181 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2182 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2183 }
2184
2185 /**
2186 * ipr_log_fabric_error - Log a fabric error.
2187 * @ioa_cfg: ioa config struct
2188 * @hostrcb: hostrcb struct
2189 *
2190 * Return value:
2191 * none
2192 **/
2193 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2195 {
2196 struct ipr_hostrcb_type_20_error *error;
2197 struct ipr_hostrcb_fabric_desc *fabric;
2198 struct ipr_hostrcb_config_element *cfg;
2199 int i, add_len;
2200
2201 error = &hostrcb->hcam.u.error.u.type_20_error;
2202 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2203 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2204
2205 add_len = be32_to_cpu(hostrcb->hcam.length) -
2206 (offsetof(struct ipr_hostrcb_error, u) +
2207 offsetof(struct ipr_hostrcb_type_20_error, desc));
2208
2209 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2210 ipr_log_fabric_path(hostrcb, fabric);
2211 for_each_fabric_cfg(fabric, cfg)
2212 ipr_log_path_elem(hostrcb, cfg);
2213
2214 add_len -= be16_to_cpu(fabric->length);
2215 fabric = (struct ipr_hostrcb_fabric_desc *)
2216 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2217 }
2218
2219 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2220 }
2221
2222 /**
2223 * ipr_log_sis64_array_error - Log a sis64 array error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2232 {
2233 int i, num_entries;
2234 struct ipr_hostrcb_type_24_error *error;
2235 struct ipr_hostrcb64_array_data_entry *array_entry;
2236 char buffer[IPR_MAX_RES_PATH_LENGTH];
2237 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2238
2239 error = &hostrcb->hcam.u.error64.u.type_24_error;
2240
2241 ipr_err_separator;
2242
2243 ipr_err("RAID %s Array Configuration: %s\n",
2244 error->protection_level,
2245 ipr_format_res_path(ioa_cfg, error->last_res_path,
2246 buffer, sizeof(buffer)));
2247
2248 ipr_err_separator;
2249
2250 array_entry = error->array_member;
2251 num_entries = min_t(u32, error->num_entries,
2252 ARRAY_SIZE(error->array_member));
2253
2254 for (i = 0; i < num_entries; i++, array_entry++) {
2255
2256 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2257 continue;
2258
2259 if (error->exposed_mode_adn == i)
2260 ipr_err("Exposed Array Member %d:\n", i);
2261 else
2262 ipr_err("Array Member %d:\n", i);
2263
2264 ipr_err("Array Member %d:\n", i);
2265 ipr_log_ext_vpd(&array_entry->vpd);
2266 ipr_err("Current Location: %s\n",
2267 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2268 buffer, sizeof(buffer)));
2269 ipr_err("Expected Location: %s\n",
2270 ipr_format_res_path(ioa_cfg,
2271 array_entry->expected_res_path,
2272 buffer, sizeof(buffer)));
2273
2274 ipr_err_separator;
2275 }
2276 }
2277
2278 /**
2279 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2280 * @ioa_cfg: ioa config struct
2281 * @hostrcb: hostrcb struct
2282 *
2283 * Return value:
2284 * none
2285 **/
2286 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2287 struct ipr_hostrcb *hostrcb)
2288 {
2289 struct ipr_hostrcb_type_30_error *error;
2290 struct ipr_hostrcb64_fabric_desc *fabric;
2291 struct ipr_hostrcb64_config_element *cfg;
2292 int i, add_len;
2293
2294 error = &hostrcb->hcam.u.error64.u.type_30_error;
2295
2296 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2297 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2298
2299 add_len = be32_to_cpu(hostrcb->hcam.length) -
2300 (offsetof(struct ipr_hostrcb64_error, u) +
2301 offsetof(struct ipr_hostrcb_type_30_error, desc));
2302
2303 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2304 ipr_log64_fabric_path(hostrcb, fabric);
2305 for_each_fabric_cfg(fabric, cfg)
2306 ipr_log64_path_elem(hostrcb, cfg);
2307
2308 add_len -= be16_to_cpu(fabric->length);
2309 fabric = (struct ipr_hostrcb64_fabric_desc *)
2310 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2311 }
2312
2313 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2314 }
2315
2316 /**
2317 * ipr_log_generic_error - Log an adapter error.
2318 * @ioa_cfg: ioa config struct
2319 * @hostrcb: hostrcb struct
2320 *
2321 * Return value:
2322 * none
2323 **/
2324 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2325 struct ipr_hostrcb *hostrcb)
2326 {
2327 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2328 be32_to_cpu(hostrcb->hcam.length));
2329 }
2330
2331 /**
2332 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2333 * @ioasc: IOASC
2334 *
2335 * This function will return the index of into the ipr_error_table
2336 * for the specified IOASC. If the IOASC is not in the table,
2337 * 0 will be returned, which points to the entry used for unknown errors.
2338 *
2339 * Return value:
2340 * index into the ipr_error_table
2341 **/
2342 static u32 ipr_get_error(u32 ioasc)
2343 {
2344 int i;
2345
2346 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2347 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2348 return i;
2349
2350 return 0;
2351 }
2352
2353 /**
2354 * ipr_handle_log_data - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2357 *
2358 * This function logs an adapter error to the system.
2359 *
2360 * Return value:
2361 * none
2362 **/
2363 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2364 struct ipr_hostrcb *hostrcb)
2365 {
2366 u32 ioasc;
2367 int error_index;
2368
2369 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2370 return;
2371
2372 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2373 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2374
2375 if (ioa_cfg->sis64)
2376 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2377 else
2378 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2379
2380 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2381 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2382 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2383 scsi_report_bus_reset(ioa_cfg->host,
2384 hostrcb->hcam.u.error.fd_res_addr.bus);
2385 }
2386
2387 error_index = ipr_get_error(ioasc);
2388
2389 if (!ipr_error_table[error_index].log_hcam)
2390 return;
2391
2392 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2393
2394 /* Set indication we have logged an error */
2395 ioa_cfg->errors_logged++;
2396
2397 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2398 return;
2399 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2400 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2401
2402 switch (hostrcb->hcam.overlay_id) {
2403 case IPR_HOST_RCB_OVERLAY_ID_2:
2404 ipr_log_cache_error(ioa_cfg, hostrcb);
2405 break;
2406 case IPR_HOST_RCB_OVERLAY_ID_3:
2407 ipr_log_config_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_4:
2410 case IPR_HOST_RCB_OVERLAY_ID_6:
2411 ipr_log_array_error(ioa_cfg, hostrcb);
2412 break;
2413 case IPR_HOST_RCB_OVERLAY_ID_7:
2414 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2415 break;
2416 case IPR_HOST_RCB_OVERLAY_ID_12:
2417 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_13:
2420 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2421 break;
2422 case IPR_HOST_RCB_OVERLAY_ID_14:
2423 case IPR_HOST_RCB_OVERLAY_ID_16:
2424 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2425 break;
2426 case IPR_HOST_RCB_OVERLAY_ID_17:
2427 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2428 break;
2429 case IPR_HOST_RCB_OVERLAY_ID_20:
2430 ipr_log_fabric_error(ioa_cfg, hostrcb);
2431 break;
2432 case IPR_HOST_RCB_OVERLAY_ID_23:
2433 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2434 break;
2435 case IPR_HOST_RCB_OVERLAY_ID_24:
2436 case IPR_HOST_RCB_OVERLAY_ID_26:
2437 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2438 break;
2439 case IPR_HOST_RCB_OVERLAY_ID_30:
2440 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2441 break;
2442 case IPR_HOST_RCB_OVERLAY_ID_1:
2443 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2444 default:
2445 ipr_log_generic_error(ioa_cfg, hostrcb);
2446 break;
2447 }
2448 }
2449
2450 /**
2451 * ipr_process_error - Op done function for an adapter error log.
2452 * @ipr_cmd: ipr command struct
2453 *
2454 * This function is the op done function for an error log host
2455 * controlled async from the adapter. It will log the error and
2456 * send the HCAM back to the adapter.
2457 *
2458 * Return value:
2459 * none
2460 **/
2461 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2462 {
2463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2464 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2465 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2466 u32 fd_ioasc;
2467
2468 if (ioa_cfg->sis64)
2469 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2470 else
2471 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2472
2473 list_del(&hostrcb->queue);
2474 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2475
2476 if (!ioasc) {
2477 ipr_handle_log_data(ioa_cfg, hostrcb);
2478 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2480 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2481 dev_err(&ioa_cfg->pdev->dev,
2482 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2483 }
2484
2485 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2486 }
2487
2488 /**
2489 * ipr_timeout - An internally generated op has timed out.
2490 * @ipr_cmd: ipr command struct
2491 *
2492 * This function blocks host requests and initiates an
2493 * adapter reset.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2499 {
2500 unsigned long lock_flags = 0;
2501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2502
2503 ENTER;
2504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2505
2506 ioa_cfg->errors_logged++;
2507 dev_err(&ioa_cfg->pdev->dev,
2508 "Adapter being reset due to command timeout.\n");
2509
2510 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2511 ioa_cfg->sdt_state = GET_DUMP;
2512
2513 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2515
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 LEAVE;
2518 }
2519
2520 /**
2521 * ipr_oper_timeout - Adapter timed out transitioning to operational
2522 * @ipr_cmd: ipr command struct
2523 *
2524 * This function blocks host requests and initiates an
2525 * adapter reset.
2526 *
2527 * Return value:
2528 * none
2529 **/
2530 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2531 {
2532 unsigned long lock_flags = 0;
2533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2534
2535 ENTER;
2536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2537
2538 ioa_cfg->errors_logged++;
2539 dev_err(&ioa_cfg->pdev->dev,
2540 "Adapter timed out transitioning to operational.\n");
2541
2542 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2543 ioa_cfg->sdt_state = GET_DUMP;
2544
2545 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2546 if (ipr_fastfail)
2547 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2549 }
2550
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2552 LEAVE;
2553 }
2554
2555 /**
2556 * ipr_find_ses_entry - Find matching SES in SES table
2557 * @res: resource entry struct of SES
2558 *
2559 * Return value:
2560 * pointer to SES table entry / NULL on failure
2561 **/
2562 static const struct ipr_ses_table_entry *
2563 ipr_find_ses_entry(struct ipr_resource_entry *res)
2564 {
2565 int i, j, matches;
2566 struct ipr_std_inq_vpids *vpids;
2567 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2568
2569 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2570 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2571 if (ste->compare_product_id_byte[j] == 'X') {
2572 vpids = &res->std_inq_data.vpids;
2573 if (vpids->product_id[j] == ste->product_id[j])
2574 matches++;
2575 else
2576 break;
2577 } else
2578 matches++;
2579 }
2580
2581 if (matches == IPR_PROD_ID_LEN)
2582 return ste;
2583 }
2584
2585 return NULL;
2586 }
2587
2588 /**
2589 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2590 * @ioa_cfg: ioa config struct
2591 * @bus: SCSI bus
2592 * @bus_width: bus width
2593 *
2594 * Return value:
2595 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2596 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2597 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2598 * max 160MHz = max 320MB/sec).
2599 **/
2600 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2601 {
2602 struct ipr_resource_entry *res;
2603 const struct ipr_ses_table_entry *ste;
2604 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2605
2606 /* Loop through each config table entry in the config table buffer */
2607 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2608 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2609 continue;
2610
2611 if (bus != res->bus)
2612 continue;
2613
2614 if (!(ste = ipr_find_ses_entry(res)))
2615 continue;
2616
2617 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2618 }
2619
2620 return max_xfer_rate;
2621 }
2622
2623 /**
2624 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2625 * @ioa_cfg: ioa config struct
2626 * @max_delay: max delay in micro-seconds to wait
2627 *
2628 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2629 *
2630 * Return value:
2631 * 0 on success / other on failure
2632 **/
2633 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2634 {
2635 volatile u32 pcii_reg;
2636 int delay = 1;
2637
2638 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2639 while (delay < max_delay) {
2640 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2641
2642 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2643 return 0;
2644
2645 /* udelay cannot be used if delay is more than a few milliseconds */
2646 if ((delay / 1000) > MAX_UDELAY_MS)
2647 mdelay(delay / 1000);
2648 else
2649 udelay(delay);
2650
2651 delay += delay;
2652 }
2653 return -EIO;
2654 }
2655
2656 /**
2657 * ipr_get_sis64_dump_data_section - Dump IOA memory
2658 * @ioa_cfg: ioa config struct
2659 * @start_addr: adapter address to dump
2660 * @dest: destination kernel buffer
2661 * @length_in_words: length to dump in 4 byte words
2662 *
2663 * Return value:
2664 * 0 on success
2665 **/
2666 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2667 u32 start_addr,
2668 __be32 *dest, u32 length_in_words)
2669 {
2670 int i;
2671
2672 for (i = 0; i < length_in_words; i++) {
2673 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2674 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2675 dest++;
2676 }
2677
2678 return 0;
2679 }
2680
2681 /**
2682 * ipr_get_ldump_data_section - Dump IOA memory
2683 * @ioa_cfg: ioa config struct
2684 * @start_addr: adapter address to dump
2685 * @dest: destination kernel buffer
2686 * @length_in_words: length to dump in 4 byte words
2687 *
2688 * Return value:
2689 * 0 on success / -EIO on failure
2690 **/
2691 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2692 u32 start_addr,
2693 __be32 *dest, u32 length_in_words)
2694 {
2695 volatile u32 temp_pcii_reg;
2696 int i, delay = 0;
2697
2698 if (ioa_cfg->sis64)
2699 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2700 dest, length_in_words);
2701
2702 /* Write IOA interrupt reg starting LDUMP state */
2703 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2704 ioa_cfg->regs.set_uproc_interrupt_reg32);
2705
2706 /* Wait for IO debug acknowledge */
2707 if (ipr_wait_iodbg_ack(ioa_cfg,
2708 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2709 dev_err(&ioa_cfg->pdev->dev,
2710 "IOA dump long data transfer timeout\n");
2711 return -EIO;
2712 }
2713
2714 /* Signal LDUMP interlocked - clear IO debug ack */
2715 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2716 ioa_cfg->regs.clr_interrupt_reg);
2717
2718 /* Write Mailbox with starting address */
2719 writel(start_addr, ioa_cfg->ioa_mailbox);
2720
2721 /* Signal address valid - clear IOA Reset alert */
2722 writel(IPR_UPROCI_RESET_ALERT,
2723 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2724
2725 for (i = 0; i < length_in_words; i++) {
2726 /* Wait for IO debug acknowledge */
2727 if (ipr_wait_iodbg_ack(ioa_cfg,
2728 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2729 dev_err(&ioa_cfg->pdev->dev,
2730 "IOA dump short data transfer timeout\n");
2731 return -EIO;
2732 }
2733
2734 /* Read data from mailbox and increment destination pointer */
2735 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2736 dest++;
2737
2738 /* For all but the last word of data, signal data received */
2739 if (i < (length_in_words - 1)) {
2740 /* Signal dump data received - Clear IO debug Ack */
2741 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2742 ioa_cfg->regs.clr_interrupt_reg);
2743 }
2744 }
2745
2746 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2747 writel(IPR_UPROCI_RESET_ALERT,
2748 ioa_cfg->regs.set_uproc_interrupt_reg32);
2749
2750 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2751 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2752
2753 /* Signal dump data received - Clear IO debug Ack */
2754 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2755 ioa_cfg->regs.clr_interrupt_reg);
2756
2757 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2758 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2759 temp_pcii_reg =
2760 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2761
2762 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2763 return 0;
2764
2765 udelay(10);
2766 delay += 10;
2767 }
2768
2769 return 0;
2770 }
2771
2772 #ifdef CONFIG_SCSI_IPR_DUMP
2773 /**
2774 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2775 * @ioa_cfg: ioa config struct
2776 * @pci_address: adapter address
2777 * @length: length of data to copy
2778 *
2779 * Copy data from PCI adapter to kernel buffer.
2780 * Note: length MUST be a 4 byte multiple
2781 * Return value:
2782 * 0 on success / other on failure
2783 **/
2784 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2785 unsigned long pci_address, u32 length)
2786 {
2787 int bytes_copied = 0;
2788 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2789 __be32 *page;
2790 unsigned long lock_flags = 0;
2791 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2792
2793 if (ioa_cfg->sis64)
2794 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2795 else
2796 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2797
2798 while (bytes_copied < length &&
2799 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2800 if (ioa_dump->page_offset >= PAGE_SIZE ||
2801 ioa_dump->page_offset == 0) {
2802 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2803
2804 if (!page) {
2805 ipr_trace;
2806 return bytes_copied;
2807 }
2808
2809 ioa_dump->page_offset = 0;
2810 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2811 ioa_dump->next_page_index++;
2812 } else
2813 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2814
2815 rem_len = length - bytes_copied;
2816 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2817 cur_len = min(rem_len, rem_page_len);
2818
2819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2820 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2821 rc = -EIO;
2822 } else {
2823 rc = ipr_get_ldump_data_section(ioa_cfg,
2824 pci_address + bytes_copied,
2825 &page[ioa_dump->page_offset / 4],
2826 (cur_len / sizeof(u32)));
2827 }
2828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2829
2830 if (!rc) {
2831 ioa_dump->page_offset += cur_len;
2832 bytes_copied += cur_len;
2833 } else {
2834 ipr_trace;
2835 break;
2836 }
2837 schedule();
2838 }
2839
2840 return bytes_copied;
2841 }
2842
2843 /**
2844 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2845 * @hdr: dump entry header struct
2846 *
2847 * Return value:
2848 * nothing
2849 **/
2850 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2851 {
2852 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2853 hdr->num_elems = 1;
2854 hdr->offset = sizeof(*hdr);
2855 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2856 }
2857
2858 /**
2859 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2860 * @ioa_cfg: ioa config struct
2861 * @driver_dump: driver dump struct
2862 *
2863 * Return value:
2864 * nothing
2865 **/
2866 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2867 struct ipr_driver_dump *driver_dump)
2868 {
2869 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2870
2871 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2872 driver_dump->ioa_type_entry.hdr.len =
2873 sizeof(struct ipr_dump_ioa_type_entry) -
2874 sizeof(struct ipr_dump_entry_header);
2875 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2876 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2877 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2878 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2879 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2880 ucode_vpd->minor_release[1];
2881 driver_dump->hdr.num_entries++;
2882 }
2883
2884 /**
2885 * ipr_dump_version_data - Fill in the driver version in the dump.
2886 * @ioa_cfg: ioa config struct
2887 * @driver_dump: driver dump struct
2888 *
2889 * Return value:
2890 * nothing
2891 **/
2892 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2893 struct ipr_driver_dump *driver_dump)
2894 {
2895 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2896 driver_dump->version_entry.hdr.len =
2897 sizeof(struct ipr_dump_version_entry) -
2898 sizeof(struct ipr_dump_entry_header);
2899 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2900 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2901 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2902 driver_dump->hdr.num_entries++;
2903 }
2904
2905 /**
2906 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2907 * @ioa_cfg: ioa config struct
2908 * @driver_dump: driver dump struct
2909 *
2910 * Return value:
2911 * nothing
2912 **/
2913 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2914 struct ipr_driver_dump *driver_dump)
2915 {
2916 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2917 driver_dump->trace_entry.hdr.len =
2918 sizeof(struct ipr_dump_trace_entry) -
2919 sizeof(struct ipr_dump_entry_header);
2920 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2921 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2922 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2923 driver_dump->hdr.num_entries++;
2924 }
2925
2926 /**
2927 * ipr_dump_location_data - Fill in the IOA location in the dump.
2928 * @ioa_cfg: ioa config struct
2929 * @driver_dump: driver dump struct
2930 *
2931 * Return value:
2932 * nothing
2933 **/
2934 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2935 struct ipr_driver_dump *driver_dump)
2936 {
2937 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2938 driver_dump->location_entry.hdr.len =
2939 sizeof(struct ipr_dump_location_entry) -
2940 sizeof(struct ipr_dump_entry_header);
2941 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2942 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2943 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2944 driver_dump->hdr.num_entries++;
2945 }
2946
2947 /**
2948 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2949 * @ioa_cfg: ioa config struct
2950 * @dump: dump struct
2951 *
2952 * Return value:
2953 * nothing
2954 **/
2955 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2956 {
2957 unsigned long start_addr, sdt_word;
2958 unsigned long lock_flags = 0;
2959 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2960 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2961 u32 num_entries, max_num_entries, start_off, end_off;
2962 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2963 struct ipr_sdt *sdt;
2964 int valid = 1;
2965 int i;
2966
2967 ENTER;
2968
2969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2970
2971 if (ioa_cfg->sdt_state != READ_DUMP) {
2972 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2973 return;
2974 }
2975
2976 if (ioa_cfg->sis64) {
2977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2978 ssleep(IPR_DUMP_DELAY_SECONDS);
2979 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2980 }
2981
2982 start_addr = readl(ioa_cfg->ioa_mailbox);
2983
2984 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2985 dev_err(&ioa_cfg->pdev->dev,
2986 "Invalid dump table format: %lx\n", start_addr);
2987 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2988 return;
2989 }
2990
2991 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2992
2993 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2994
2995 /* Initialize the overall dump header */
2996 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2997 driver_dump->hdr.num_entries = 1;
2998 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2999 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3000 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3001 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3002
3003 ipr_dump_version_data(ioa_cfg, driver_dump);
3004 ipr_dump_location_data(ioa_cfg, driver_dump);
3005 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3006 ipr_dump_trace_data(ioa_cfg, driver_dump);
3007
3008 /* Update dump_header */
3009 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3010
3011 /* IOA Dump entry */
3012 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3013 ioa_dump->hdr.len = 0;
3014 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3016
3017 /* First entries in sdt are actually a list of dump addresses and
3018 lengths to gather the real dump data. sdt represents the pointer
3019 to the ioa generated dump table. Dump data will be extracted based
3020 on entries in this table */
3021 sdt = &ioa_dump->sdt;
3022
3023 if (ioa_cfg->sis64) {
3024 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3025 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3026 } else {
3027 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3028 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3029 }
3030
3031 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3032 (max_num_entries * sizeof(struct ipr_sdt_entry));
3033 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3034 bytes_to_copy / sizeof(__be32));
3035
3036 /* Smart Dump table is ready to use and the first entry is valid */
3037 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3038 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3039 dev_err(&ioa_cfg->pdev->dev,
3040 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3041 rc, be32_to_cpu(sdt->hdr.state));
3042 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3043 ioa_cfg->sdt_state = DUMP_OBTAINED;
3044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3045 return;
3046 }
3047
3048 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3049
3050 if (num_entries > max_num_entries)
3051 num_entries = max_num_entries;
3052
3053 /* Update dump length to the actual data to be copied */
3054 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3055 if (ioa_cfg->sis64)
3056 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3057 else
3058 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3059
3060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061
3062 for (i = 0; i < num_entries; i++) {
3063 if (ioa_dump->hdr.len > max_dump_size) {
3064 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3065 break;
3066 }
3067
3068 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3069 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3070 if (ioa_cfg->sis64)
3071 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3072 else {
3073 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3074 end_off = be32_to_cpu(sdt->entry[i].end_token);
3075
3076 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3077 bytes_to_copy = end_off - start_off;
3078 else
3079 valid = 0;
3080 }
3081 if (valid) {
3082 if (bytes_to_copy > max_dump_size) {
3083 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3084 continue;
3085 }
3086
3087 /* Copy data from adapter to driver buffers */
3088 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3089 bytes_to_copy);
3090
3091 ioa_dump->hdr.len += bytes_copied;
3092
3093 if (bytes_copied != bytes_to_copy) {
3094 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3095 break;
3096 }
3097 }
3098 }
3099 }
3100
3101 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3102
3103 /* Update dump_header */
3104 driver_dump->hdr.len += ioa_dump->hdr.len;
3105 wmb();
3106 ioa_cfg->sdt_state = DUMP_OBTAINED;
3107 LEAVE;
3108 }
3109
3110 #else
3111 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3112 #endif
3113
3114 /**
3115 * ipr_release_dump - Free adapter dump memory
3116 * @kref: kref struct
3117 *
3118 * Return value:
3119 * nothing
3120 **/
3121 static void ipr_release_dump(struct kref *kref)
3122 {
3123 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3124 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3125 unsigned long lock_flags = 0;
3126 int i;
3127
3128 ENTER;
3129 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3130 ioa_cfg->dump = NULL;
3131 ioa_cfg->sdt_state = INACTIVE;
3132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133
3134 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3135 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3136
3137 vfree(dump->ioa_dump.ioa_data);
3138 kfree(dump);
3139 LEAVE;
3140 }
3141
3142 /**
3143 * ipr_worker_thread - Worker thread
3144 * @work: ioa config struct
3145 *
3146 * Called at task level from a work thread. This function takes care
3147 * of adding and removing device from the mid-layer as configuration
3148 * changes are detected by the adapter.
3149 *
3150 * Return value:
3151 * nothing
3152 **/
3153 static void ipr_worker_thread(struct work_struct *work)
3154 {
3155 unsigned long lock_flags;
3156 struct ipr_resource_entry *res;
3157 struct scsi_device *sdev;
3158 struct ipr_dump *dump;
3159 struct ipr_ioa_cfg *ioa_cfg =
3160 container_of(work, struct ipr_ioa_cfg, work_q);
3161 u8 bus, target, lun;
3162 int did_work;
3163
3164 ENTER;
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
3167 if (ioa_cfg->sdt_state == READ_DUMP) {
3168 dump = ioa_cfg->dump;
3169 if (!dump) {
3170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171 return;
3172 }
3173 kref_get(&dump->kref);
3174 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3175 ipr_get_ioa_dump(ioa_cfg, dump);
3176 kref_put(&dump->kref, ipr_release_dump);
3177
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3180 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3182 return;
3183 }
3184
3185 restart:
3186 do {
3187 did_work = 0;
3188 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3189 !ioa_cfg->allow_ml_add_del) {
3190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3191 return;
3192 }
3193
3194 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3195 if (res->del_from_ml && res->sdev) {
3196 did_work = 1;
3197 sdev = res->sdev;
3198 if (!scsi_device_get(sdev)) {
3199 if (!res->add_to_ml)
3200 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3201 else
3202 res->del_from_ml = 0;
3203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3204 scsi_remove_device(sdev);
3205 scsi_device_put(sdev);
3206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3207 }
3208 break;
3209 }
3210 }
3211 } while (did_work);
3212
3213 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3214 if (res->add_to_ml) {
3215 bus = res->bus;
3216 target = res->target;
3217 lun = res->lun;
3218 res->add_to_ml = 0;
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 scsi_add_device(ioa_cfg->host, bus, target, lun);
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222 goto restart;
3223 }
3224 }
3225
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3228 LEAVE;
3229 }
3230
3231 #ifdef CONFIG_SCSI_IPR_TRACE
3232 /**
3233 * ipr_read_trace - Dump the adapter trace
3234 * @filp: open sysfs file
3235 * @kobj: kobject struct
3236 * @bin_attr: bin_attribute struct
3237 * @buf: buffer
3238 * @off: offset
3239 * @count: buffer size
3240 *
3241 * Return value:
3242 * number of bytes printed to buffer
3243 **/
3244 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3245 struct bin_attribute *bin_attr,
3246 char *buf, loff_t off, size_t count)
3247 {
3248 struct device *dev = container_of(kobj, struct device, kobj);
3249 struct Scsi_Host *shost = class_to_shost(dev);
3250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3251 unsigned long lock_flags = 0;
3252 ssize_t ret;
3253
3254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3255 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3256 IPR_TRACE_SIZE);
3257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3258
3259 return ret;
3260 }
3261
3262 static struct bin_attribute ipr_trace_attr = {
3263 .attr = {
3264 .name = "trace",
3265 .mode = S_IRUGO,
3266 },
3267 .size = 0,
3268 .read = ipr_read_trace,
3269 };
3270 #endif
3271
3272 /**
3273 * ipr_show_fw_version - Show the firmware version
3274 * @dev: class device struct
3275 * @buf: buffer
3276 *
3277 * Return value:
3278 * number of bytes printed to buffer
3279 **/
3280 static ssize_t ipr_show_fw_version(struct device *dev,
3281 struct device_attribute *attr, char *buf)
3282 {
3283 struct Scsi_Host *shost = class_to_shost(dev);
3284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3285 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3286 unsigned long lock_flags = 0;
3287 int len;
3288
3289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3291 ucode_vpd->major_release, ucode_vpd->card_type,
3292 ucode_vpd->minor_release[0],
3293 ucode_vpd->minor_release[1]);
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295 return len;
3296 }
3297
3298 static struct device_attribute ipr_fw_version_attr = {
3299 .attr = {
3300 .name = "fw_version",
3301 .mode = S_IRUGO,
3302 },
3303 .show = ipr_show_fw_version,
3304 };
3305
3306 /**
3307 * ipr_show_log_level - Show the adapter's error logging level
3308 * @dev: class device struct
3309 * @buf: buffer
3310 *
3311 * Return value:
3312 * number of bytes printed to buffer
3313 **/
3314 static ssize_t ipr_show_log_level(struct device *dev,
3315 struct device_attribute *attr, char *buf)
3316 {
3317 struct Scsi_Host *shost = class_to_shost(dev);
3318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3319 unsigned long lock_flags = 0;
3320 int len;
3321
3322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3323 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325 return len;
3326 }
3327
3328 /**
3329 * ipr_store_log_level - Change the adapter's error logging level
3330 * @dev: class device struct
3331 * @buf: buffer
3332 *
3333 * Return value:
3334 * number of bytes printed to buffer
3335 **/
3336 static ssize_t ipr_store_log_level(struct device *dev,
3337 struct device_attribute *attr,
3338 const char *buf, size_t count)
3339 {
3340 struct Scsi_Host *shost = class_to_shost(dev);
3341 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3342 unsigned long lock_flags = 0;
3343
3344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 return strlen(buf);
3348 }
3349
3350 static struct device_attribute ipr_log_level_attr = {
3351 .attr = {
3352 .name = "log_level",
3353 .mode = S_IRUGO | S_IWUSR,
3354 },
3355 .show = ipr_show_log_level,
3356 .store = ipr_store_log_level
3357 };
3358
3359 /**
3360 * ipr_store_diagnostics - IOA Diagnostics interface
3361 * @dev: device struct
3362 * @buf: buffer
3363 * @count: buffer size
3364 *
3365 * This function will reset the adapter and wait a reasonable
3366 * amount of time for any errors that the adapter might log.
3367 *
3368 * Return value:
3369 * count on success / other on failure
3370 **/
3371 static ssize_t ipr_store_diagnostics(struct device *dev,
3372 struct device_attribute *attr,
3373 const char *buf, size_t count)
3374 {
3375 struct Scsi_Host *shost = class_to_shost(dev);
3376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3377 unsigned long lock_flags = 0;
3378 int rc = count;
3379
3380 if (!capable(CAP_SYS_ADMIN))
3381 return -EACCES;
3382
3383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384 while (ioa_cfg->in_reset_reload) {
3385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3387 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388 }
3389
3390 ioa_cfg->errors_logged = 0;
3391 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3392
3393 if (ioa_cfg->in_reset_reload) {
3394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3396
3397 /* Wait for a second for any errors to be logged */
3398 msleep(1000);
3399 } else {
3400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3401 return -EIO;
3402 }
3403
3404 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3405 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3406 rc = -EIO;
3407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408
3409 return rc;
3410 }
3411
3412 static struct device_attribute ipr_diagnostics_attr = {
3413 .attr = {
3414 .name = "run_diagnostics",
3415 .mode = S_IWUSR,
3416 },
3417 .store = ipr_store_diagnostics
3418 };
3419
3420 /**
3421 * ipr_show_adapter_state - Show the adapter's state
3422 * @class_dev: device struct
3423 * @buf: buffer
3424 *
3425 * Return value:
3426 * number of bytes printed to buffer
3427 **/
3428 static ssize_t ipr_show_adapter_state(struct device *dev,
3429 struct device_attribute *attr, char *buf)
3430 {
3431 struct Scsi_Host *shost = class_to_shost(dev);
3432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3433 unsigned long lock_flags = 0;
3434 int len;
3435
3436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3437 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3438 len = snprintf(buf, PAGE_SIZE, "offline\n");
3439 else
3440 len = snprintf(buf, PAGE_SIZE, "online\n");
3441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3442 return len;
3443 }
3444
3445 /**
3446 * ipr_store_adapter_state - Change adapter state
3447 * @dev: device struct
3448 * @buf: buffer
3449 * @count: buffer size
3450 *
3451 * This function will change the adapter's state.
3452 *
3453 * Return value:
3454 * count on success / other on failure
3455 **/
3456 static ssize_t ipr_store_adapter_state(struct device *dev,
3457 struct device_attribute *attr,
3458 const char *buf, size_t count)
3459 {
3460 struct Scsi_Host *shost = class_to_shost(dev);
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3462 unsigned long lock_flags;
3463 int result = count, i;
3464
3465 if (!capable(CAP_SYS_ADMIN))
3466 return -EACCES;
3467
3468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3469 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3470 !strncmp(buf, "online", 6)) {
3471 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3472 spin_lock(&ioa_cfg->hrrq[i]._lock);
3473 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3474 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3475 }
3476 wmb();
3477 ioa_cfg->reset_retries = 0;
3478 ioa_cfg->in_ioa_bringdown = 0;
3479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3480 }
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3483
3484 return result;
3485 }
3486
3487 static struct device_attribute ipr_ioa_state_attr = {
3488 .attr = {
3489 .name = "online_state",
3490 .mode = S_IRUGO | S_IWUSR,
3491 },
3492 .show = ipr_show_adapter_state,
3493 .store = ipr_store_adapter_state
3494 };
3495
3496 /**
3497 * ipr_store_reset_adapter - Reset the adapter
3498 * @dev: device struct
3499 * @buf: buffer
3500 * @count: buffer size
3501 *
3502 * This function will reset the adapter.
3503 *
3504 * Return value:
3505 * count on success / other on failure
3506 **/
3507 static ssize_t ipr_store_reset_adapter(struct device *dev,
3508 struct device_attribute *attr,
3509 const char *buf, size_t count)
3510 {
3511 struct Scsi_Host *shost = class_to_shost(dev);
3512 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3513 unsigned long lock_flags;
3514 int result = count;
3515
3516 if (!capable(CAP_SYS_ADMIN))
3517 return -EACCES;
3518
3519 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3520 if (!ioa_cfg->in_reset_reload)
3521 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3523 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3524
3525 return result;
3526 }
3527
3528 static struct device_attribute ipr_ioa_reset_attr = {
3529 .attr = {
3530 .name = "reset_host",
3531 .mode = S_IWUSR,
3532 },
3533 .store = ipr_store_reset_adapter
3534 };
3535
3536 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3537 /**
3538 * ipr_show_iopoll_weight - Show ipr polling mode
3539 * @dev: class device struct
3540 * @buf: buffer
3541 *
3542 * Return value:
3543 * number of bytes printed to buffer
3544 **/
3545 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3546 struct device_attribute *attr, char *buf)
3547 {
3548 struct Scsi_Host *shost = class_to_shost(dev);
3549 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550 unsigned long lock_flags = 0;
3551 int len;
3552
3553 spin_lock_irqsave(shost->host_lock, lock_flags);
3554 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3555 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3556
3557 return len;
3558 }
3559
3560 /**
3561 * ipr_store_iopoll_weight - Change the adapter's polling mode
3562 * @dev: class device struct
3563 * @buf: buffer
3564 *
3565 * Return value:
3566 * number of bytes printed to buffer
3567 **/
3568 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3569 struct device_attribute *attr,
3570 const char *buf, size_t count)
3571 {
3572 struct Scsi_Host *shost = class_to_shost(dev);
3573 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3574 unsigned long user_iopoll_weight;
3575 unsigned long lock_flags = 0;
3576 int i;
3577
3578 if (!ioa_cfg->sis64) {
3579 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3580 return -EINVAL;
3581 }
3582 if (kstrtoul(buf, 10, &user_iopoll_weight))
3583 return -EINVAL;
3584
3585 if (user_iopoll_weight > 256) {
3586 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3587 return -EINVAL;
3588 }
3589
3590 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3591 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3592 return strlen(buf);
3593 }
3594
3595 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3596 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3597 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3598 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3599 }
3600
3601 spin_lock_irqsave(shost->host_lock, lock_flags);
3602 ioa_cfg->iopoll_weight = user_iopoll_weight;
3603 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3604 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3605 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3606 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3607 ioa_cfg->iopoll_weight, ipr_iopoll);
3608 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3609 }
3610 }
3611 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3612
3613 return strlen(buf);
3614 }
3615
3616 static struct device_attribute ipr_iopoll_weight_attr = {
3617 .attr = {
3618 .name = "iopoll_weight",
3619 .mode = S_IRUGO | S_IWUSR,
3620 },
3621 .show = ipr_show_iopoll_weight,
3622 .store = ipr_store_iopoll_weight
3623 };
3624
3625 /**
3626 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3627 * @buf_len: buffer length
3628 *
3629 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3630 * list to use for microcode download
3631 *
3632 * Return value:
3633 * pointer to sglist / NULL on failure
3634 **/
3635 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3636 {
3637 int sg_size, order, bsize_elem, num_elem, i, j;
3638 struct ipr_sglist *sglist;
3639 struct scatterlist *scatterlist;
3640 struct page *page;
3641
3642 /* Get the minimum size per scatter/gather element */
3643 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3644
3645 /* Get the actual size per element */
3646 order = get_order(sg_size);
3647
3648 /* Determine the actual number of bytes per element */
3649 bsize_elem = PAGE_SIZE * (1 << order);
3650
3651 /* Determine the actual number of sg entries needed */
3652 if (buf_len % bsize_elem)
3653 num_elem = (buf_len / bsize_elem) + 1;
3654 else
3655 num_elem = buf_len / bsize_elem;
3656
3657 /* Allocate a scatter/gather list for the DMA */
3658 sglist = kzalloc(sizeof(struct ipr_sglist) +
3659 (sizeof(struct scatterlist) * (num_elem - 1)),
3660 GFP_KERNEL);
3661
3662 if (sglist == NULL) {
3663 ipr_trace;
3664 return NULL;
3665 }
3666
3667 scatterlist = sglist->scatterlist;
3668 sg_init_table(scatterlist, num_elem);
3669
3670 sglist->order = order;
3671 sglist->num_sg = num_elem;
3672
3673 /* Allocate a bunch of sg elements */
3674 for (i = 0; i < num_elem; i++) {
3675 page = alloc_pages(GFP_KERNEL, order);
3676 if (!page) {
3677 ipr_trace;
3678
3679 /* Free up what we already allocated */
3680 for (j = i - 1; j >= 0; j--)
3681 __free_pages(sg_page(&scatterlist[j]), order);
3682 kfree(sglist);
3683 return NULL;
3684 }
3685
3686 sg_set_page(&scatterlist[i], page, 0, 0);
3687 }
3688
3689 return sglist;
3690 }
3691
3692 /**
3693 * ipr_free_ucode_buffer - Frees a microcode download buffer
3694 * @p_dnld: scatter/gather list pointer
3695 *
3696 * Free a DMA'able ucode download buffer previously allocated with
3697 * ipr_alloc_ucode_buffer
3698 *
3699 * Return value:
3700 * nothing
3701 **/
3702 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3703 {
3704 int i;
3705
3706 for (i = 0; i < sglist->num_sg; i++)
3707 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3708
3709 kfree(sglist);
3710 }
3711
3712 /**
3713 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3714 * @sglist: scatter/gather list pointer
3715 * @buffer: buffer pointer
3716 * @len: buffer length
3717 *
3718 * Copy a microcode image from a user buffer into a buffer allocated by
3719 * ipr_alloc_ucode_buffer
3720 *
3721 * Return value:
3722 * 0 on success / other on failure
3723 **/
3724 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3725 u8 *buffer, u32 len)
3726 {
3727 int bsize_elem, i, result = 0;
3728 struct scatterlist *scatterlist;
3729 void *kaddr;
3730
3731 /* Determine the actual number of bytes per element */
3732 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3733
3734 scatterlist = sglist->scatterlist;
3735
3736 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3737 struct page *page = sg_page(&scatterlist[i]);
3738
3739 kaddr = kmap(page);
3740 memcpy(kaddr, buffer, bsize_elem);
3741 kunmap(page);
3742
3743 scatterlist[i].length = bsize_elem;
3744
3745 if (result != 0) {
3746 ipr_trace;
3747 return result;
3748 }
3749 }
3750
3751 if (len % bsize_elem) {
3752 struct page *page = sg_page(&scatterlist[i]);
3753
3754 kaddr = kmap(page);
3755 memcpy(kaddr, buffer, len % bsize_elem);
3756 kunmap(page);
3757
3758 scatterlist[i].length = len % bsize_elem;
3759 }
3760
3761 sglist->buffer_len = len;
3762 return result;
3763 }
3764
3765 /**
3766 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3767 * @ipr_cmd: ipr command struct
3768 * @sglist: scatter/gather list
3769 *
3770 * Builds a microcode download IOA data list (IOADL).
3771 *
3772 **/
3773 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3774 struct ipr_sglist *sglist)
3775 {
3776 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3777 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3778 struct scatterlist *scatterlist = sglist->scatterlist;
3779 int i;
3780
3781 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3782 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3783 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3784
3785 ioarcb->ioadl_len =
3786 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3787 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3788 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3789 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3790 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3791 }
3792
3793 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3794 }
3795
3796 /**
3797 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3798 * @ipr_cmd: ipr command struct
3799 * @sglist: scatter/gather list
3800 *
3801 * Builds a microcode download IOA data list (IOADL).
3802 *
3803 **/
3804 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3805 struct ipr_sglist *sglist)
3806 {
3807 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3808 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3809 struct scatterlist *scatterlist = sglist->scatterlist;
3810 int i;
3811
3812 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3813 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3814 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3815
3816 ioarcb->ioadl_len =
3817 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3818
3819 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3820 ioadl[i].flags_and_data_len =
3821 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3822 ioadl[i].address =
3823 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3824 }
3825
3826 ioadl[i-1].flags_and_data_len |=
3827 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3828 }
3829
3830 /**
3831 * ipr_update_ioa_ucode - Update IOA's microcode
3832 * @ioa_cfg: ioa config struct
3833 * @sglist: scatter/gather list
3834 *
3835 * Initiate an adapter reset to update the IOA's microcode
3836 *
3837 * Return value:
3838 * 0 on success / -EIO on failure
3839 **/
3840 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3841 struct ipr_sglist *sglist)
3842 {
3843 unsigned long lock_flags;
3844
3845 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3846 while (ioa_cfg->in_reset_reload) {
3847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3848 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3849 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3850 }
3851
3852 if (ioa_cfg->ucode_sglist) {
3853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3854 dev_err(&ioa_cfg->pdev->dev,
3855 "Microcode download already in progress\n");
3856 return -EIO;
3857 }
3858
3859 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3860 sglist->num_sg, DMA_TO_DEVICE);
3861
3862 if (!sglist->num_dma_sg) {
3863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3864 dev_err(&ioa_cfg->pdev->dev,
3865 "Failed to map microcode download buffer!\n");
3866 return -EIO;
3867 }
3868
3869 ioa_cfg->ucode_sglist = sglist;
3870 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3871 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3872 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3873
3874 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3875 ioa_cfg->ucode_sglist = NULL;
3876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3877 return 0;
3878 }
3879
3880 /**
3881 * ipr_store_update_fw - Update the firmware on the adapter
3882 * @class_dev: device struct
3883 * @buf: buffer
3884 * @count: buffer size
3885 *
3886 * This function will update the firmware on the adapter.
3887 *
3888 * Return value:
3889 * count on success / other on failure
3890 **/
3891 static ssize_t ipr_store_update_fw(struct device *dev,
3892 struct device_attribute *attr,
3893 const char *buf, size_t count)
3894 {
3895 struct Scsi_Host *shost = class_to_shost(dev);
3896 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3897 struct ipr_ucode_image_header *image_hdr;
3898 const struct firmware *fw_entry;
3899 struct ipr_sglist *sglist;
3900 char fname[100];
3901 char *src;
3902 int len, result, dnld_size;
3903
3904 if (!capable(CAP_SYS_ADMIN))
3905 return -EACCES;
3906
3907 len = snprintf(fname, 99, "%s", buf);
3908 fname[len-1] = '\0';
3909
3910 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3911 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3912 return -EIO;
3913 }
3914
3915 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3916
3917 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3918 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3919 sglist = ipr_alloc_ucode_buffer(dnld_size);
3920
3921 if (!sglist) {
3922 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3923 release_firmware(fw_entry);
3924 return -ENOMEM;
3925 }
3926
3927 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3928
3929 if (result) {
3930 dev_err(&ioa_cfg->pdev->dev,
3931 "Microcode buffer copy to DMA buffer failed\n");
3932 goto out;
3933 }
3934
3935 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3936
3937 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3938
3939 if (!result)
3940 result = count;
3941 out:
3942 ipr_free_ucode_buffer(sglist);
3943 release_firmware(fw_entry);
3944 return result;
3945 }
3946
3947 static struct device_attribute ipr_update_fw_attr = {
3948 .attr = {
3949 .name = "update_fw",
3950 .mode = S_IWUSR,
3951 },
3952 .store = ipr_store_update_fw
3953 };
3954
3955 /**
3956 * ipr_show_fw_type - Show the adapter's firmware type.
3957 * @dev: class device struct
3958 * @buf: buffer
3959 *
3960 * Return value:
3961 * number of bytes printed to buffer
3962 **/
3963 static ssize_t ipr_show_fw_type(struct device *dev,
3964 struct device_attribute *attr, char *buf)
3965 {
3966 struct Scsi_Host *shost = class_to_shost(dev);
3967 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3968 unsigned long lock_flags = 0;
3969 int len;
3970
3971 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3974 return len;
3975 }
3976
3977 static struct device_attribute ipr_ioa_fw_type_attr = {
3978 .attr = {
3979 .name = "fw_type",
3980 .mode = S_IRUGO,
3981 },
3982 .show = ipr_show_fw_type
3983 };
3984
3985 static struct device_attribute *ipr_ioa_attrs[] = {
3986 &ipr_fw_version_attr,
3987 &ipr_log_level_attr,
3988 &ipr_diagnostics_attr,
3989 &ipr_ioa_state_attr,
3990 &ipr_ioa_reset_attr,
3991 &ipr_update_fw_attr,
3992 &ipr_ioa_fw_type_attr,
3993 &ipr_iopoll_weight_attr,
3994 NULL,
3995 };
3996
3997 #ifdef CONFIG_SCSI_IPR_DUMP
3998 /**
3999 * ipr_read_dump - Dump the adapter
4000 * @filp: open sysfs file
4001 * @kobj: kobject struct
4002 * @bin_attr: bin_attribute struct
4003 * @buf: buffer
4004 * @off: offset
4005 * @count: buffer size
4006 *
4007 * Return value:
4008 * number of bytes printed to buffer
4009 **/
4010 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4011 struct bin_attribute *bin_attr,
4012 char *buf, loff_t off, size_t count)
4013 {
4014 struct device *cdev = container_of(kobj, struct device, kobj);
4015 struct Scsi_Host *shost = class_to_shost(cdev);
4016 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4017 struct ipr_dump *dump;
4018 unsigned long lock_flags = 0;
4019 char *src;
4020 int len, sdt_end;
4021 size_t rc = count;
4022
4023 if (!capable(CAP_SYS_ADMIN))
4024 return -EACCES;
4025
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 dump = ioa_cfg->dump;
4028
4029 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4031 return 0;
4032 }
4033 kref_get(&dump->kref);
4034 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4035
4036 if (off > dump->driver_dump.hdr.len) {
4037 kref_put(&dump->kref, ipr_release_dump);
4038 return 0;
4039 }
4040
4041 if (off + count > dump->driver_dump.hdr.len) {
4042 count = dump->driver_dump.hdr.len - off;
4043 rc = count;
4044 }
4045
4046 if (count && off < sizeof(dump->driver_dump)) {
4047 if (off + count > sizeof(dump->driver_dump))
4048 len = sizeof(dump->driver_dump) - off;
4049 else
4050 len = count;
4051 src = (u8 *)&dump->driver_dump + off;
4052 memcpy(buf, src, len);
4053 buf += len;
4054 off += len;
4055 count -= len;
4056 }
4057
4058 off -= sizeof(dump->driver_dump);
4059
4060 if (ioa_cfg->sis64)
4061 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4062 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4063 sizeof(struct ipr_sdt_entry));
4064 else
4065 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4066 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4067
4068 if (count && off < sdt_end) {
4069 if (off + count > sdt_end)
4070 len = sdt_end - off;
4071 else
4072 len = count;
4073 src = (u8 *)&dump->ioa_dump + off;
4074 memcpy(buf, src, len);
4075 buf += len;
4076 off += len;
4077 count -= len;
4078 }
4079
4080 off -= sdt_end;
4081
4082 while (count) {
4083 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4084 len = PAGE_ALIGN(off) - off;
4085 else
4086 len = count;
4087 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4088 src += off & ~PAGE_MASK;
4089 memcpy(buf, src, len);
4090 buf += len;
4091 off += len;
4092 count -= len;
4093 }
4094
4095 kref_put(&dump->kref, ipr_release_dump);
4096 return rc;
4097 }
4098
4099 /**
4100 * ipr_alloc_dump - Prepare for adapter dump
4101 * @ioa_cfg: ioa config struct
4102 *
4103 * Return value:
4104 * 0 on success / other on failure
4105 **/
4106 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4107 {
4108 struct ipr_dump *dump;
4109 __be32 **ioa_data;
4110 unsigned long lock_flags = 0;
4111
4112 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4113
4114 if (!dump) {
4115 ipr_err("Dump memory allocation failed\n");
4116 return -ENOMEM;
4117 }
4118
4119 if (ioa_cfg->sis64)
4120 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4121 else
4122 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4123
4124 if (!ioa_data) {
4125 ipr_err("Dump memory allocation failed\n");
4126 kfree(dump);
4127 return -ENOMEM;
4128 }
4129
4130 dump->ioa_dump.ioa_data = ioa_data;
4131
4132 kref_init(&dump->kref);
4133 dump->ioa_cfg = ioa_cfg;
4134
4135 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4136
4137 if (INACTIVE != ioa_cfg->sdt_state) {
4138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4139 vfree(dump->ioa_dump.ioa_data);
4140 kfree(dump);
4141 return 0;
4142 }
4143
4144 ioa_cfg->dump = dump;
4145 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4146 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4147 ioa_cfg->dump_taken = 1;
4148 schedule_work(&ioa_cfg->work_q);
4149 }
4150 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4151
4152 return 0;
4153 }
4154
4155 /**
4156 * ipr_free_dump - Free adapter dump memory
4157 * @ioa_cfg: ioa config struct
4158 *
4159 * Return value:
4160 * 0 on success / other on failure
4161 **/
4162 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4163 {
4164 struct ipr_dump *dump;
4165 unsigned long lock_flags = 0;
4166
4167 ENTER;
4168
4169 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4170 dump = ioa_cfg->dump;
4171 if (!dump) {
4172 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4173 return 0;
4174 }
4175
4176 ioa_cfg->dump = NULL;
4177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4178
4179 kref_put(&dump->kref, ipr_release_dump);
4180
4181 LEAVE;
4182 return 0;
4183 }
4184
4185 /**
4186 * ipr_write_dump - Setup dump state of adapter
4187 * @filp: open sysfs file
4188 * @kobj: kobject struct
4189 * @bin_attr: bin_attribute struct
4190 * @buf: buffer
4191 * @off: offset
4192 * @count: buffer size
4193 *
4194 * Return value:
4195 * number of bytes printed to buffer
4196 **/
4197 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4198 struct bin_attribute *bin_attr,
4199 char *buf, loff_t off, size_t count)
4200 {
4201 struct device *cdev = container_of(kobj, struct device, kobj);
4202 struct Scsi_Host *shost = class_to_shost(cdev);
4203 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4204 int rc;
4205
4206 if (!capable(CAP_SYS_ADMIN))
4207 return -EACCES;
4208
4209 if (buf[0] == '1')
4210 rc = ipr_alloc_dump(ioa_cfg);
4211 else if (buf[0] == '0')
4212 rc = ipr_free_dump(ioa_cfg);
4213 else
4214 return -EINVAL;
4215
4216 if (rc)
4217 return rc;
4218 else
4219 return count;
4220 }
4221
4222 static struct bin_attribute ipr_dump_attr = {
4223 .attr = {
4224 .name = "dump",
4225 .mode = S_IRUSR | S_IWUSR,
4226 },
4227 .size = 0,
4228 .read = ipr_read_dump,
4229 .write = ipr_write_dump
4230 };
4231 #else
4232 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4233 #endif
4234
4235 /**
4236 * ipr_change_queue_depth - Change the device's queue depth
4237 * @sdev: scsi device struct
4238 * @qdepth: depth to set
4239 * @reason: calling context
4240 *
4241 * Return value:
4242 * actual depth set
4243 **/
4244 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4245 int reason)
4246 {
4247 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4248 struct ipr_resource_entry *res;
4249 unsigned long lock_flags = 0;
4250
4251 if (reason != SCSI_QDEPTH_DEFAULT)
4252 return -EOPNOTSUPP;
4253
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 res = (struct ipr_resource_entry *)sdev->hostdata;
4256
4257 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4258 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4260
4261 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4262 return sdev->queue_depth;
4263 }
4264
4265 /**
4266 * ipr_change_queue_type - Change the device's queue type
4267 * @dsev: scsi device struct
4268 * @tag_type: type of tags to use
4269 *
4270 * Return value:
4271 * actual queue type set
4272 **/
4273 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4274 {
4275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4276 struct ipr_resource_entry *res;
4277 unsigned long lock_flags = 0;
4278
4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4280 res = (struct ipr_resource_entry *)sdev->hostdata;
4281
4282 if (res) {
4283 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4284 /*
4285 * We don't bother quiescing the device here since the
4286 * adapter firmware does it for us.
4287 */
4288 scsi_set_tag_type(sdev, tag_type);
4289
4290 if (tag_type)
4291 scsi_activate_tcq(sdev, sdev->queue_depth);
4292 else
4293 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4294 } else
4295 tag_type = 0;
4296 } else
4297 tag_type = 0;
4298
4299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4300 return tag_type;
4301 }
4302
4303 /**
4304 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4305 * @dev: device struct
4306 * @attr: device attribute structure
4307 * @buf: buffer
4308 *
4309 * Return value:
4310 * number of bytes printed to buffer
4311 **/
4312 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4313 {
4314 struct scsi_device *sdev = to_scsi_device(dev);
4315 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4316 struct ipr_resource_entry *res;
4317 unsigned long lock_flags = 0;
4318 ssize_t len = -ENXIO;
4319
4320 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4321 res = (struct ipr_resource_entry *)sdev->hostdata;
4322 if (res)
4323 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4325 return len;
4326 }
4327
4328 static struct device_attribute ipr_adapter_handle_attr = {
4329 .attr = {
4330 .name = "adapter_handle",
4331 .mode = S_IRUSR,
4332 },
4333 .show = ipr_show_adapter_handle
4334 };
4335
4336 /**
4337 * ipr_show_resource_path - Show the resource path or the resource address for
4338 * this device.
4339 * @dev: device struct
4340 * @attr: device attribute structure
4341 * @buf: buffer
4342 *
4343 * Return value:
4344 * number of bytes printed to buffer
4345 **/
4346 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4347 {
4348 struct scsi_device *sdev = to_scsi_device(dev);
4349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4350 struct ipr_resource_entry *res;
4351 unsigned long lock_flags = 0;
4352 ssize_t len = -ENXIO;
4353 char buffer[IPR_MAX_RES_PATH_LENGTH];
4354
4355 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4356 res = (struct ipr_resource_entry *)sdev->hostdata;
4357 if (res && ioa_cfg->sis64)
4358 len = snprintf(buf, PAGE_SIZE, "%s\n",
4359 __ipr_format_res_path(res->res_path, buffer,
4360 sizeof(buffer)));
4361 else if (res)
4362 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4363 res->bus, res->target, res->lun);
4364
4365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4366 return len;
4367 }
4368
4369 static struct device_attribute ipr_resource_path_attr = {
4370 .attr = {
4371 .name = "resource_path",
4372 .mode = S_IRUGO,
4373 },
4374 .show = ipr_show_resource_path
4375 };
4376
4377 /**
4378 * ipr_show_device_id - Show the device_id for this device.
4379 * @dev: device struct
4380 * @attr: device attribute structure
4381 * @buf: buffer
4382 *
4383 * Return value:
4384 * number of bytes printed to buffer
4385 **/
4386 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4387 {
4388 struct scsi_device *sdev = to_scsi_device(dev);
4389 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4390 struct ipr_resource_entry *res;
4391 unsigned long lock_flags = 0;
4392 ssize_t len = -ENXIO;
4393
4394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4395 res = (struct ipr_resource_entry *)sdev->hostdata;
4396 if (res && ioa_cfg->sis64)
4397 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4398 else if (res)
4399 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4400
4401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4402 return len;
4403 }
4404
4405 static struct device_attribute ipr_device_id_attr = {
4406 .attr = {
4407 .name = "device_id",
4408 .mode = S_IRUGO,
4409 },
4410 .show = ipr_show_device_id
4411 };
4412
4413 /**
4414 * ipr_show_resource_type - Show the resource type for this device.
4415 * @dev: device struct
4416 * @attr: device attribute structure
4417 * @buf: buffer
4418 *
4419 * Return value:
4420 * number of bytes printed to buffer
4421 **/
4422 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4423 {
4424 struct scsi_device *sdev = to_scsi_device(dev);
4425 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4426 struct ipr_resource_entry *res;
4427 unsigned long lock_flags = 0;
4428 ssize_t len = -ENXIO;
4429
4430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4431 res = (struct ipr_resource_entry *)sdev->hostdata;
4432
4433 if (res)
4434 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4435
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437 return len;
4438 }
4439
4440 static struct device_attribute ipr_resource_type_attr = {
4441 .attr = {
4442 .name = "resource_type",
4443 .mode = S_IRUGO,
4444 },
4445 .show = ipr_show_resource_type
4446 };
4447
4448 static struct device_attribute *ipr_dev_attrs[] = {
4449 &ipr_adapter_handle_attr,
4450 &ipr_resource_path_attr,
4451 &ipr_device_id_attr,
4452 &ipr_resource_type_attr,
4453 NULL,
4454 };
4455
4456 /**
4457 * ipr_biosparam - Return the HSC mapping
4458 * @sdev: scsi device struct
4459 * @block_device: block device pointer
4460 * @capacity: capacity of the device
4461 * @parm: Array containing returned HSC values.
4462 *
4463 * This function generates the HSC parms that fdisk uses.
4464 * We want to make sure we return something that places partitions
4465 * on 4k boundaries for best performance with the IOA.
4466 *
4467 * Return value:
4468 * 0 on success
4469 **/
4470 static int ipr_biosparam(struct scsi_device *sdev,
4471 struct block_device *block_device,
4472 sector_t capacity, int *parm)
4473 {
4474 int heads, sectors;
4475 sector_t cylinders;
4476
4477 heads = 128;
4478 sectors = 32;
4479
4480 cylinders = capacity;
4481 sector_div(cylinders, (128 * 32));
4482
4483 /* return result */
4484 parm[0] = heads;
4485 parm[1] = sectors;
4486 parm[2] = cylinders;
4487
4488 return 0;
4489 }
4490
4491 /**
4492 * ipr_find_starget - Find target based on bus/target.
4493 * @starget: scsi target struct
4494 *
4495 * Return value:
4496 * resource entry pointer if found / NULL if not found
4497 **/
4498 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4499 {
4500 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4502 struct ipr_resource_entry *res;
4503
4504 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4505 if ((res->bus == starget->channel) &&
4506 (res->target == starget->id)) {
4507 return res;
4508 }
4509 }
4510
4511 return NULL;
4512 }
4513
4514 static struct ata_port_info sata_port_info;
4515
4516 /**
4517 * ipr_target_alloc - Prepare for commands to a SCSI target
4518 * @starget: scsi target struct
4519 *
4520 * If the device is a SATA device, this function allocates an
4521 * ATA port with libata, else it does nothing.
4522 *
4523 * Return value:
4524 * 0 on success / non-0 on failure
4525 **/
4526 static int ipr_target_alloc(struct scsi_target *starget)
4527 {
4528 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4530 struct ipr_sata_port *sata_port;
4531 struct ata_port *ap;
4532 struct ipr_resource_entry *res;
4533 unsigned long lock_flags;
4534
4535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536 res = ipr_find_starget(starget);
4537 starget->hostdata = NULL;
4538
4539 if (res && ipr_is_gata(res)) {
4540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4541 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4542 if (!sata_port)
4543 return -ENOMEM;
4544
4545 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4546 if (ap) {
4547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4548 sata_port->ioa_cfg = ioa_cfg;
4549 sata_port->ap = ap;
4550 sata_port->res = res;
4551
4552 res->sata_port = sata_port;
4553 ap->private_data = sata_port;
4554 starget->hostdata = sata_port;
4555 } else {
4556 kfree(sata_port);
4557 return -ENOMEM;
4558 }
4559 }
4560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4561
4562 return 0;
4563 }
4564
4565 /**
4566 * ipr_target_destroy - Destroy a SCSI target
4567 * @starget: scsi target struct
4568 *
4569 * If the device was a SATA device, this function frees the libata
4570 * ATA port, else it does nothing.
4571 *
4572 **/
4573 static void ipr_target_destroy(struct scsi_target *starget)
4574 {
4575 struct ipr_sata_port *sata_port = starget->hostdata;
4576 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4578
4579 if (ioa_cfg->sis64) {
4580 if (!ipr_find_starget(starget)) {
4581 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4582 clear_bit(starget->id, ioa_cfg->array_ids);
4583 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4584 clear_bit(starget->id, ioa_cfg->vset_ids);
4585 else if (starget->channel == 0)
4586 clear_bit(starget->id, ioa_cfg->target_ids);
4587 }
4588 }
4589
4590 if (sata_port) {
4591 starget->hostdata = NULL;
4592 ata_sas_port_destroy(sata_port->ap);
4593 kfree(sata_port);
4594 }
4595 }
4596
4597 /**
4598 * ipr_find_sdev - Find device based on bus/target/lun.
4599 * @sdev: scsi device struct
4600 *
4601 * Return value:
4602 * resource entry pointer if found / NULL if not found
4603 **/
4604 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4605 {
4606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4607 struct ipr_resource_entry *res;
4608
4609 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4610 if ((res->bus == sdev->channel) &&
4611 (res->target == sdev->id) &&
4612 (res->lun == sdev->lun))
4613 return res;
4614 }
4615
4616 return NULL;
4617 }
4618
4619 /**
4620 * ipr_slave_destroy - Unconfigure a SCSI device
4621 * @sdev: scsi device struct
4622 *
4623 * Return value:
4624 * nothing
4625 **/
4626 static void ipr_slave_destroy(struct scsi_device *sdev)
4627 {
4628 struct ipr_resource_entry *res;
4629 struct ipr_ioa_cfg *ioa_cfg;
4630 unsigned long lock_flags = 0;
4631
4632 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4633
4634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4635 res = (struct ipr_resource_entry *) sdev->hostdata;
4636 if (res) {
4637 if (res->sata_port)
4638 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4639 sdev->hostdata = NULL;
4640 res->sdev = NULL;
4641 res->sata_port = NULL;
4642 }
4643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4644 }
4645
4646 /**
4647 * ipr_slave_configure - Configure a SCSI device
4648 * @sdev: scsi device struct
4649 *
4650 * This function configures the specified scsi device.
4651 *
4652 * Return value:
4653 * 0 on success
4654 **/
4655 static int ipr_slave_configure(struct scsi_device *sdev)
4656 {
4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4658 struct ipr_resource_entry *res;
4659 struct ata_port *ap = NULL;
4660 unsigned long lock_flags = 0;
4661 char buffer[IPR_MAX_RES_PATH_LENGTH];
4662
4663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664 res = sdev->hostdata;
4665 if (res) {
4666 if (ipr_is_af_dasd_device(res))
4667 sdev->type = TYPE_RAID;
4668 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4669 sdev->scsi_level = 4;
4670 sdev->no_uld_attach = 1;
4671 }
4672 if (ipr_is_vset_device(res)) {
4673 blk_queue_rq_timeout(sdev->request_queue,
4674 IPR_VSET_RW_TIMEOUT);
4675 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4676 }
4677 if (ipr_is_gata(res) && res->sata_port)
4678 ap = res->sata_port->ap;
4679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4680
4681 if (ap) {
4682 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4683 ata_sas_slave_configure(sdev, ap);
4684 } else
4685 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4686 if (ioa_cfg->sis64)
4687 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4688 ipr_format_res_path(ioa_cfg,
4689 res->res_path, buffer, sizeof(buffer)));
4690 return 0;
4691 }
4692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4693 return 0;
4694 }
4695
4696 /**
4697 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4698 * @sdev: scsi device struct
4699 *
4700 * This function initializes an ATA port so that future commands
4701 * sent through queuecommand will work.
4702 *
4703 * Return value:
4704 * 0 on success
4705 **/
4706 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4707 {
4708 struct ipr_sata_port *sata_port = NULL;
4709 int rc = -ENXIO;
4710
4711 ENTER;
4712 if (sdev->sdev_target)
4713 sata_port = sdev->sdev_target->hostdata;
4714 if (sata_port) {
4715 rc = ata_sas_port_init(sata_port->ap);
4716 if (rc == 0)
4717 rc = ata_sas_sync_probe(sata_port->ap);
4718 }
4719
4720 if (rc)
4721 ipr_slave_destroy(sdev);
4722
4723 LEAVE;
4724 return rc;
4725 }
4726
4727 /**
4728 * ipr_slave_alloc - Prepare for commands to a device.
4729 * @sdev: scsi device struct
4730 *
4731 * This function saves a pointer to the resource entry
4732 * in the scsi device struct if the device exists. We
4733 * can then use this pointer in ipr_queuecommand when
4734 * handling new commands.
4735 *
4736 * Return value:
4737 * 0 on success / -ENXIO if device does not exist
4738 **/
4739 static int ipr_slave_alloc(struct scsi_device *sdev)
4740 {
4741 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4742 struct ipr_resource_entry *res;
4743 unsigned long lock_flags;
4744 int rc = -ENXIO;
4745
4746 sdev->hostdata = NULL;
4747
4748 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4749
4750 res = ipr_find_sdev(sdev);
4751 if (res) {
4752 res->sdev = sdev;
4753 res->add_to_ml = 0;
4754 res->in_erp = 0;
4755 sdev->hostdata = res;
4756 if (!ipr_is_naca_model(res))
4757 res->needs_sync_complete = 1;
4758 rc = 0;
4759 if (ipr_is_gata(res)) {
4760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4761 return ipr_ata_slave_alloc(sdev);
4762 }
4763 }
4764
4765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4766
4767 return rc;
4768 }
4769
4770 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4771 {
4772 struct ipr_ioa_cfg *ioa_cfg;
4773 unsigned long lock_flags = 0;
4774 int rc = SUCCESS;
4775
4776 ENTER;
4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779
4780 if (!ioa_cfg->in_reset_reload) {
4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4782 dev_err(&ioa_cfg->pdev->dev,
4783 "Adapter being reset as a result of error recovery.\n");
4784
4785 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4786 ioa_cfg->sdt_state = GET_DUMP;
4787 }
4788
4789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4790 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4792
4793 /* If we got hit with a host reset while we were already resetting
4794 the adapter for some reason, and the reset failed. */
4795 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4796 ipr_trace;
4797 rc = FAILED;
4798 }
4799
4800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4801 LEAVE;
4802 return rc;
4803 }
4804
4805 /**
4806 * ipr_device_reset - Reset the device
4807 * @ioa_cfg: ioa config struct
4808 * @res: resource entry struct
4809 *
4810 * This function issues a device reset to the affected device.
4811 * If the device is a SCSI device, a LUN reset will be sent
4812 * to the device first. If that does not work, a target reset
4813 * will be sent. If the device is a SATA device, a PHY reset will
4814 * be sent.
4815 *
4816 * Return value:
4817 * 0 on success / non-zero on failure
4818 **/
4819 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4820 struct ipr_resource_entry *res)
4821 {
4822 struct ipr_cmnd *ipr_cmd;
4823 struct ipr_ioarcb *ioarcb;
4824 struct ipr_cmd_pkt *cmd_pkt;
4825 struct ipr_ioarcb_ata_regs *regs;
4826 u32 ioasc;
4827
4828 ENTER;
4829 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4830 ioarcb = &ipr_cmd->ioarcb;
4831 cmd_pkt = &ioarcb->cmd_pkt;
4832
4833 if (ipr_cmd->ioa_cfg->sis64) {
4834 regs = &ipr_cmd->i.ata_ioadl.regs;
4835 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4836 } else
4837 regs = &ioarcb->u.add_data.u.regs;
4838
4839 ioarcb->res_handle = res->res_handle;
4840 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4841 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4842 if (ipr_is_gata(res)) {
4843 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4844 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4845 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4846 }
4847
4848 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4849 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4850 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4851 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4852 if (ipr_cmd->ioa_cfg->sis64)
4853 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4854 sizeof(struct ipr_ioasa_gata));
4855 else
4856 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4857 sizeof(struct ipr_ioasa_gata));
4858 }
4859
4860 LEAVE;
4861 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4862 }
4863
4864 /**
4865 * ipr_sata_reset - Reset the SATA port
4866 * @link: SATA link to reset
4867 * @classes: class of the attached device
4868 *
4869 * This function issues a SATA phy reset to the affected ATA link.
4870 *
4871 * Return value:
4872 * 0 on success / non-zero on failure
4873 **/
4874 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4875 unsigned long deadline)
4876 {
4877 struct ipr_sata_port *sata_port = link->ap->private_data;
4878 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4879 struct ipr_resource_entry *res;
4880 unsigned long lock_flags = 0;
4881 int rc = -ENXIO;
4882
4883 ENTER;
4884 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4885 while (ioa_cfg->in_reset_reload) {
4886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4887 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4889 }
4890
4891 res = sata_port->res;
4892 if (res) {
4893 rc = ipr_device_reset(ioa_cfg, res);
4894 *classes = res->ata_class;
4895 }
4896
4897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4898 LEAVE;
4899 return rc;
4900 }
4901
4902 /**
4903 * ipr_eh_dev_reset - Reset the device
4904 * @scsi_cmd: scsi command struct
4905 *
4906 * This function issues a device reset to the affected device.
4907 * A LUN reset will be sent to the device first. If that does
4908 * not work, a target reset will be sent.
4909 *
4910 * Return value:
4911 * SUCCESS / FAILED
4912 **/
4913 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4914 {
4915 struct ipr_cmnd *ipr_cmd;
4916 struct ipr_ioa_cfg *ioa_cfg;
4917 struct ipr_resource_entry *res;
4918 struct ata_port *ap;
4919 int rc = 0;
4920 struct ipr_hrr_queue *hrrq;
4921
4922 ENTER;
4923 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4924 res = scsi_cmd->device->hostdata;
4925
4926 if (!res)
4927 return FAILED;
4928
4929 /*
4930 * If we are currently going through reset/reload, return failed. This will force the
4931 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4932 * reset to complete
4933 */
4934 if (ioa_cfg->in_reset_reload)
4935 return FAILED;
4936 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4937 return FAILED;
4938
4939 for_each_hrrq(hrrq, ioa_cfg) {
4940 spin_lock(&hrrq->_lock);
4941 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4942 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4943 if (ipr_cmd->scsi_cmd)
4944 ipr_cmd->done = ipr_scsi_eh_done;
4945 if (ipr_cmd->qc)
4946 ipr_cmd->done = ipr_sata_eh_done;
4947 if (ipr_cmd->qc &&
4948 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4949 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4950 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4951 }
4952 }
4953 }
4954 spin_unlock(&hrrq->_lock);
4955 }
4956 res->resetting_device = 1;
4957 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4958
4959 if (ipr_is_gata(res) && res->sata_port) {
4960 ap = res->sata_port->ap;
4961 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4962 ata_std_error_handler(ap);
4963 spin_lock_irq(scsi_cmd->device->host->host_lock);
4964
4965 for_each_hrrq(hrrq, ioa_cfg) {
4966 spin_lock(&hrrq->_lock);
4967 list_for_each_entry(ipr_cmd,
4968 &hrrq->hrrq_pending_q, queue) {
4969 if (ipr_cmd->ioarcb.res_handle ==
4970 res->res_handle) {
4971 rc = -EIO;
4972 break;
4973 }
4974 }
4975 spin_unlock(&hrrq->_lock);
4976 }
4977 } else
4978 rc = ipr_device_reset(ioa_cfg, res);
4979 res->resetting_device = 0;
4980
4981 LEAVE;
4982 return rc ? FAILED : SUCCESS;
4983 }
4984
4985 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
4986 {
4987 int rc;
4988
4989 spin_lock_irq(cmd->device->host->host_lock);
4990 rc = __ipr_eh_dev_reset(cmd);
4991 spin_unlock_irq(cmd->device->host->host_lock);
4992
4993 return rc;
4994 }
4995
4996 /**
4997 * ipr_bus_reset_done - Op done function for bus reset.
4998 * @ipr_cmd: ipr command struct
4999 *
5000 * This function is the op done function for a bus reset
5001 *
5002 * Return value:
5003 * none
5004 **/
5005 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5006 {
5007 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5008 struct ipr_resource_entry *res;
5009
5010 ENTER;
5011 if (!ioa_cfg->sis64)
5012 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5013 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5014 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5015 break;
5016 }
5017 }
5018
5019 /*
5020 * If abort has not completed, indicate the reset has, else call the
5021 * abort's done function to wake the sleeping eh thread
5022 */
5023 if (ipr_cmd->sibling->sibling)
5024 ipr_cmd->sibling->sibling = NULL;
5025 else
5026 ipr_cmd->sibling->done(ipr_cmd->sibling);
5027
5028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5029 LEAVE;
5030 }
5031
5032 /**
5033 * ipr_abort_timeout - An abort task has timed out
5034 * @ipr_cmd: ipr command struct
5035 *
5036 * This function handles when an abort task times out. If this
5037 * happens we issue a bus reset since we have resources tied
5038 * up that must be freed before returning to the midlayer.
5039 *
5040 * Return value:
5041 * none
5042 **/
5043 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5044 {
5045 struct ipr_cmnd *reset_cmd;
5046 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5047 struct ipr_cmd_pkt *cmd_pkt;
5048 unsigned long lock_flags = 0;
5049
5050 ENTER;
5051 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5052 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5054 return;
5055 }
5056
5057 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5058 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5059 ipr_cmd->sibling = reset_cmd;
5060 reset_cmd->sibling = ipr_cmd;
5061 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5062 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5063 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5064 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5065 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5066
5067 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5069 LEAVE;
5070 }
5071
5072 /**
5073 * ipr_cancel_op - Cancel specified op
5074 * @scsi_cmd: scsi command struct
5075 *
5076 * This function cancels specified op.
5077 *
5078 * Return value:
5079 * SUCCESS / FAILED
5080 **/
5081 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5082 {
5083 struct ipr_cmnd *ipr_cmd;
5084 struct ipr_ioa_cfg *ioa_cfg;
5085 struct ipr_resource_entry *res;
5086 struct ipr_cmd_pkt *cmd_pkt;
5087 u32 ioasc, int_reg;
5088 int op_found = 0;
5089 struct ipr_hrr_queue *hrrq;
5090
5091 ENTER;
5092 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5093 res = scsi_cmd->device->hostdata;
5094
5095 /* If we are currently going through reset/reload, return failed.
5096 * This will force the mid-layer to call ipr_eh_host_reset,
5097 * which will then go to sleep and wait for the reset to complete
5098 */
5099 if (ioa_cfg->in_reset_reload ||
5100 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5101 return FAILED;
5102 if (!res)
5103 return FAILED;
5104
5105 /*
5106 * If we are aborting a timed out op, chances are that the timeout was caused
5107 * by a still not detected EEH error. In such cases, reading a register will
5108 * trigger the EEH recovery infrastructure.
5109 */
5110 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5111
5112 if (!ipr_is_gscsi(res))
5113 return FAILED;
5114
5115 for_each_hrrq(hrrq, ioa_cfg) {
5116 spin_lock(&hrrq->_lock);
5117 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5118 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5119 ipr_cmd->done = ipr_scsi_eh_done;
5120 op_found = 1;
5121 break;
5122 }
5123 }
5124 spin_unlock(&hrrq->_lock);
5125 }
5126
5127 if (!op_found)
5128 return SUCCESS;
5129
5130 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5131 ipr_cmd->ioarcb.res_handle = res->res_handle;
5132 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5133 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5134 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5135 ipr_cmd->u.sdev = scsi_cmd->device;
5136
5137 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5138 scsi_cmd->cmnd[0]);
5139 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5140 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5141
5142 /*
5143 * If the abort task timed out and we sent a bus reset, we will get
5144 * one the following responses to the abort
5145 */
5146 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5147 ioasc = 0;
5148 ipr_trace;
5149 }
5150
5151 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5152 if (!ipr_is_naca_model(res))
5153 res->needs_sync_complete = 1;
5154
5155 LEAVE;
5156 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5157 }
5158
5159 /**
5160 * ipr_eh_abort - Abort a single op
5161 * @scsi_cmd: scsi command struct
5162 *
5163 * Return value:
5164 * SUCCESS / FAILED
5165 **/
5166 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5167 {
5168 unsigned long flags;
5169 int rc;
5170
5171 ENTER;
5172
5173 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5174 rc = ipr_cancel_op(scsi_cmd);
5175 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5176
5177 LEAVE;
5178 return rc;
5179 }
5180
5181 /**
5182 * ipr_handle_other_interrupt - Handle "other" interrupts
5183 * @ioa_cfg: ioa config struct
5184 * @int_reg: interrupt register
5185 *
5186 * Return value:
5187 * IRQ_NONE / IRQ_HANDLED
5188 **/
5189 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5190 u32 int_reg)
5191 {
5192 irqreturn_t rc = IRQ_HANDLED;
5193 u32 int_mask_reg;
5194
5195 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5196 int_reg &= ~int_mask_reg;
5197
5198 /* If an interrupt on the adapter did not occur, ignore it.
5199 * Or in the case of SIS 64, check for a stage change interrupt.
5200 */
5201 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5202 if (ioa_cfg->sis64) {
5203 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5204 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5205 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5206
5207 /* clear stage change */
5208 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5209 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5210 list_del(&ioa_cfg->reset_cmd->queue);
5211 del_timer(&ioa_cfg->reset_cmd->timer);
5212 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5213 return IRQ_HANDLED;
5214 }
5215 }
5216
5217 return IRQ_NONE;
5218 }
5219
5220 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5221 /* Mask the interrupt */
5222 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5223
5224 /* Clear the interrupt */
5225 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5226 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5227
5228 list_del(&ioa_cfg->reset_cmd->queue);
5229 del_timer(&ioa_cfg->reset_cmd->timer);
5230 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5231 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5232 if (ioa_cfg->clear_isr) {
5233 if (ipr_debug && printk_ratelimit())
5234 dev_err(&ioa_cfg->pdev->dev,
5235 "Spurious interrupt detected. 0x%08X\n", int_reg);
5236 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5237 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5238 return IRQ_NONE;
5239 }
5240 } else {
5241 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5242 ioa_cfg->ioa_unit_checked = 1;
5243 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5244 dev_err(&ioa_cfg->pdev->dev,
5245 "No Host RRQ. 0x%08X\n", int_reg);
5246 else
5247 dev_err(&ioa_cfg->pdev->dev,
5248 "Permanent IOA failure. 0x%08X\n", int_reg);
5249
5250 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5251 ioa_cfg->sdt_state = GET_DUMP;
5252
5253 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5254 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5255 }
5256
5257 return rc;
5258 }
5259
5260 /**
5261 * ipr_isr_eh - Interrupt service routine error handler
5262 * @ioa_cfg: ioa config struct
5263 * @msg: message to log
5264 *
5265 * Return value:
5266 * none
5267 **/
5268 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5269 {
5270 ioa_cfg->errors_logged++;
5271 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5272
5273 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5274 ioa_cfg->sdt_state = GET_DUMP;
5275
5276 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5277 }
5278
5279 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5280 struct list_head *doneq)
5281 {
5282 u32 ioasc;
5283 u16 cmd_index;
5284 struct ipr_cmnd *ipr_cmd;
5285 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5286 int num_hrrq = 0;
5287
5288 /* If interrupts are disabled, ignore the interrupt */
5289 if (!hrr_queue->allow_interrupts)
5290 return 0;
5291
5292 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5293 hrr_queue->toggle_bit) {
5294
5295 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5296 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5297 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5298
5299 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5300 cmd_index < hrr_queue->min_cmd_id)) {
5301 ipr_isr_eh(ioa_cfg,
5302 "Invalid response handle from IOA: ",
5303 cmd_index);
5304 break;
5305 }
5306
5307 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5308 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5309
5310 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5311
5312 list_move_tail(&ipr_cmd->queue, doneq);
5313
5314 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5315 hrr_queue->hrrq_curr++;
5316 } else {
5317 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5318 hrr_queue->toggle_bit ^= 1u;
5319 }
5320 num_hrrq++;
5321 if (budget > 0 && num_hrrq >= budget)
5322 break;
5323 }
5324
5325 return num_hrrq;
5326 }
5327
5328 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5329 {
5330 struct ipr_ioa_cfg *ioa_cfg;
5331 struct ipr_hrr_queue *hrrq;
5332 struct ipr_cmnd *ipr_cmd, *temp;
5333 unsigned long hrrq_flags;
5334 int completed_ops;
5335 LIST_HEAD(doneq);
5336
5337 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5338 ioa_cfg = hrrq->ioa_cfg;
5339
5340 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5341 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5342
5343 if (completed_ops < budget)
5344 blk_iopoll_complete(iop);
5345 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5346
5347 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5348 list_del(&ipr_cmd->queue);
5349 del_timer(&ipr_cmd->timer);
5350 ipr_cmd->fast_done(ipr_cmd);
5351 }
5352
5353 return completed_ops;
5354 }
5355
5356 /**
5357 * ipr_isr - Interrupt service routine
5358 * @irq: irq number
5359 * @devp: pointer to ioa config struct
5360 *
5361 * Return value:
5362 * IRQ_NONE / IRQ_HANDLED
5363 **/
5364 static irqreturn_t ipr_isr(int irq, void *devp)
5365 {
5366 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5367 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5368 unsigned long hrrq_flags = 0;
5369 u32 int_reg = 0;
5370 int num_hrrq = 0;
5371 int irq_none = 0;
5372 struct ipr_cmnd *ipr_cmd, *temp;
5373 irqreturn_t rc = IRQ_NONE;
5374 LIST_HEAD(doneq);
5375
5376 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5377 /* If interrupts are disabled, ignore the interrupt */
5378 if (!hrrq->allow_interrupts) {
5379 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5380 return IRQ_NONE;
5381 }
5382
5383 while (1) {
5384 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5385 rc = IRQ_HANDLED;
5386
5387 if (!ioa_cfg->clear_isr)
5388 break;
5389
5390 /* Clear the PCI interrupt */
5391 num_hrrq = 0;
5392 do {
5393 writel(IPR_PCII_HRRQ_UPDATED,
5394 ioa_cfg->regs.clr_interrupt_reg32);
5395 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5396 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5397 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5398
5399 } else if (rc == IRQ_NONE && irq_none == 0) {
5400 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5401 irq_none++;
5402 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5403 int_reg & IPR_PCII_HRRQ_UPDATED) {
5404 ipr_isr_eh(ioa_cfg,
5405 "Error clearing HRRQ: ", num_hrrq);
5406 rc = IRQ_HANDLED;
5407 break;
5408 } else
5409 break;
5410 }
5411
5412 if (unlikely(rc == IRQ_NONE))
5413 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5414
5415 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5416 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5417 list_del(&ipr_cmd->queue);
5418 del_timer(&ipr_cmd->timer);
5419 ipr_cmd->fast_done(ipr_cmd);
5420 }
5421 return rc;
5422 }
5423
5424 /**
5425 * ipr_isr_mhrrq - Interrupt service routine
5426 * @irq: irq number
5427 * @devp: pointer to ioa config struct
5428 *
5429 * Return value:
5430 * IRQ_NONE / IRQ_HANDLED
5431 **/
5432 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5433 {
5434 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5435 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5436 unsigned long hrrq_flags = 0;
5437 struct ipr_cmnd *ipr_cmd, *temp;
5438 irqreturn_t rc = IRQ_NONE;
5439 LIST_HEAD(doneq);
5440
5441 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5442
5443 /* If interrupts are disabled, ignore the interrupt */
5444 if (!hrrq->allow_interrupts) {
5445 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5446 return IRQ_NONE;
5447 }
5448
5449 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5450 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5451 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5452 hrrq->toggle_bit) {
5453 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5454 blk_iopoll_sched(&hrrq->iopoll);
5455 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5456 return IRQ_HANDLED;
5457 }
5458 } else {
5459 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5460 hrrq->toggle_bit)
5461
5462 if (ipr_process_hrrq(hrrq, -1, &doneq))
5463 rc = IRQ_HANDLED;
5464 }
5465
5466 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5467
5468 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5469 list_del(&ipr_cmd->queue);
5470 del_timer(&ipr_cmd->timer);
5471 ipr_cmd->fast_done(ipr_cmd);
5472 }
5473 return rc;
5474 }
5475
5476 /**
5477 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5478 * @ioa_cfg: ioa config struct
5479 * @ipr_cmd: ipr command struct
5480 *
5481 * Return value:
5482 * 0 on success / -1 on failure
5483 **/
5484 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5485 struct ipr_cmnd *ipr_cmd)
5486 {
5487 int i, nseg;
5488 struct scatterlist *sg;
5489 u32 length;
5490 u32 ioadl_flags = 0;
5491 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5492 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5493 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5494
5495 length = scsi_bufflen(scsi_cmd);
5496 if (!length)
5497 return 0;
5498
5499 nseg = scsi_dma_map(scsi_cmd);
5500 if (nseg < 0) {
5501 if (printk_ratelimit())
5502 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5503 return -1;
5504 }
5505
5506 ipr_cmd->dma_use_sg = nseg;
5507
5508 ioarcb->data_transfer_length = cpu_to_be32(length);
5509 ioarcb->ioadl_len =
5510 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5511
5512 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5513 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5514 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5515 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5516 ioadl_flags = IPR_IOADL_FLAGS_READ;
5517
5518 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5519 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5520 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5521 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5522 }
5523
5524 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5525 return 0;
5526 }
5527
5528 /**
5529 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5530 * @ioa_cfg: ioa config struct
5531 * @ipr_cmd: ipr command struct
5532 *
5533 * Return value:
5534 * 0 on success / -1 on failure
5535 **/
5536 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5537 struct ipr_cmnd *ipr_cmd)
5538 {
5539 int i, nseg;
5540 struct scatterlist *sg;
5541 u32 length;
5542 u32 ioadl_flags = 0;
5543 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5544 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5545 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5546
5547 length = scsi_bufflen(scsi_cmd);
5548 if (!length)
5549 return 0;
5550
5551 nseg = scsi_dma_map(scsi_cmd);
5552 if (nseg < 0) {
5553 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5554 return -1;
5555 }
5556
5557 ipr_cmd->dma_use_sg = nseg;
5558
5559 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5560 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5561 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5562 ioarcb->data_transfer_length = cpu_to_be32(length);
5563 ioarcb->ioadl_len =
5564 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5565 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5566 ioadl_flags = IPR_IOADL_FLAGS_READ;
5567 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5568 ioarcb->read_ioadl_len =
5569 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5570 }
5571
5572 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5573 ioadl = ioarcb->u.add_data.u.ioadl;
5574 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5575 offsetof(struct ipr_ioarcb, u.add_data));
5576 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5577 }
5578
5579 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5580 ioadl[i].flags_and_data_len =
5581 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5582 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5583 }
5584
5585 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5586 return 0;
5587 }
5588
5589 /**
5590 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5591 * @scsi_cmd: scsi command struct
5592 *
5593 * Return value:
5594 * task attributes
5595 **/
5596 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5597 {
5598 u8 tag[2];
5599 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5600
5601 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5602 switch (tag[0]) {
5603 case MSG_SIMPLE_TAG:
5604 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5605 break;
5606 case MSG_HEAD_TAG:
5607 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5608 break;
5609 case MSG_ORDERED_TAG:
5610 rc = IPR_FLAGS_LO_ORDERED_TASK;
5611 break;
5612 };
5613 }
5614
5615 return rc;
5616 }
5617
5618 /**
5619 * ipr_erp_done - Process completion of ERP for a device
5620 * @ipr_cmd: ipr command struct
5621 *
5622 * This function copies the sense buffer into the scsi_cmd
5623 * struct and pushes the scsi_done function.
5624 *
5625 * Return value:
5626 * nothing
5627 **/
5628 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5629 {
5630 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5631 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5632 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5633
5634 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5635 scsi_cmd->result |= (DID_ERROR << 16);
5636 scmd_printk(KERN_ERR, scsi_cmd,
5637 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5638 } else {
5639 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5640 SCSI_SENSE_BUFFERSIZE);
5641 }
5642
5643 if (res) {
5644 if (!ipr_is_naca_model(res))
5645 res->needs_sync_complete = 1;
5646 res->in_erp = 0;
5647 }
5648 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5650 scsi_cmd->scsi_done(scsi_cmd);
5651 }
5652
5653 /**
5654 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5655 * @ipr_cmd: ipr command struct
5656 *
5657 * Return value:
5658 * none
5659 **/
5660 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5661 {
5662 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5663 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5664 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5665
5666 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5667 ioarcb->data_transfer_length = 0;
5668 ioarcb->read_data_transfer_length = 0;
5669 ioarcb->ioadl_len = 0;
5670 ioarcb->read_ioadl_len = 0;
5671 ioasa->hdr.ioasc = 0;
5672 ioasa->hdr.residual_data_len = 0;
5673
5674 if (ipr_cmd->ioa_cfg->sis64)
5675 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5676 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5677 else {
5678 ioarcb->write_ioadl_addr =
5679 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5680 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5681 }
5682 }
5683
5684 /**
5685 * ipr_erp_request_sense - Send request sense to a device
5686 * @ipr_cmd: ipr command struct
5687 *
5688 * This function sends a request sense to a device as a result
5689 * of a check condition.
5690 *
5691 * Return value:
5692 * nothing
5693 **/
5694 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5695 {
5696 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5697 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5698
5699 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5700 ipr_erp_done(ipr_cmd);
5701 return;
5702 }
5703
5704 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5705
5706 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5707 cmd_pkt->cdb[0] = REQUEST_SENSE;
5708 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5709 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5710 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5711 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5712
5713 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5714 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5715
5716 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5717 IPR_REQUEST_SENSE_TIMEOUT * 2);
5718 }
5719
5720 /**
5721 * ipr_erp_cancel_all - Send cancel all to a device
5722 * @ipr_cmd: ipr command struct
5723 *
5724 * This function sends a cancel all to a device to clear the
5725 * queue. If we are running TCQ on the device, QERR is set to 1,
5726 * which means all outstanding ops have been dropped on the floor.
5727 * Cancel all will return them to us.
5728 *
5729 * Return value:
5730 * nothing
5731 **/
5732 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5733 {
5734 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5735 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5736 struct ipr_cmd_pkt *cmd_pkt;
5737
5738 res->in_erp = 1;
5739
5740 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5741
5742 if (!scsi_get_tag_type(scsi_cmd->device)) {
5743 ipr_erp_request_sense(ipr_cmd);
5744 return;
5745 }
5746
5747 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5748 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5749 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5750
5751 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5752 IPR_CANCEL_ALL_TIMEOUT);
5753 }
5754
5755 /**
5756 * ipr_dump_ioasa - Dump contents of IOASA
5757 * @ioa_cfg: ioa config struct
5758 * @ipr_cmd: ipr command struct
5759 * @res: resource entry struct
5760 *
5761 * This function is invoked by the interrupt handler when ops
5762 * fail. It will log the IOASA if appropriate. Only called
5763 * for GPDD ops.
5764 *
5765 * Return value:
5766 * none
5767 **/
5768 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5769 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5770 {
5771 int i;
5772 u16 data_len;
5773 u32 ioasc, fd_ioasc;
5774 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5775 __be32 *ioasa_data = (__be32 *)ioasa;
5776 int error_index;
5777
5778 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5779 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5780
5781 if (0 == ioasc)
5782 return;
5783
5784 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5785 return;
5786
5787 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5788 error_index = ipr_get_error(fd_ioasc);
5789 else
5790 error_index = ipr_get_error(ioasc);
5791
5792 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5793 /* Don't log an error if the IOA already logged one */
5794 if (ioasa->hdr.ilid != 0)
5795 return;
5796
5797 if (!ipr_is_gscsi(res))
5798 return;
5799
5800 if (ipr_error_table[error_index].log_ioasa == 0)
5801 return;
5802 }
5803
5804 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5805
5806 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5807 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5808 data_len = sizeof(struct ipr_ioasa64);
5809 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5810 data_len = sizeof(struct ipr_ioasa);
5811
5812 ipr_err("IOASA Dump:\n");
5813
5814 for (i = 0; i < data_len / 4; i += 4) {
5815 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5816 be32_to_cpu(ioasa_data[i]),
5817 be32_to_cpu(ioasa_data[i+1]),
5818 be32_to_cpu(ioasa_data[i+2]),
5819 be32_to_cpu(ioasa_data[i+3]));
5820 }
5821 }
5822
5823 /**
5824 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5825 * @ioasa: IOASA
5826 * @sense_buf: sense data buffer
5827 *
5828 * Return value:
5829 * none
5830 **/
5831 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5832 {
5833 u32 failing_lba;
5834 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5835 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5836 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5837 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5838
5839 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5840
5841 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5842 return;
5843
5844 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5845
5846 if (ipr_is_vset_device(res) &&
5847 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5848 ioasa->u.vset.failing_lba_hi != 0) {
5849 sense_buf[0] = 0x72;
5850 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5851 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5852 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5853
5854 sense_buf[7] = 12;
5855 sense_buf[8] = 0;
5856 sense_buf[9] = 0x0A;
5857 sense_buf[10] = 0x80;
5858
5859 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5860
5861 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5862 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5863 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5864 sense_buf[15] = failing_lba & 0x000000ff;
5865
5866 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5867
5868 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5869 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5870 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5871 sense_buf[19] = failing_lba & 0x000000ff;
5872 } else {
5873 sense_buf[0] = 0x70;
5874 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5875 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5876 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5877
5878 /* Illegal request */
5879 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5880 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5881 sense_buf[7] = 10; /* additional length */
5882
5883 /* IOARCB was in error */
5884 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5885 sense_buf[15] = 0xC0;
5886 else /* Parameter data was invalid */
5887 sense_buf[15] = 0x80;
5888
5889 sense_buf[16] =
5890 ((IPR_FIELD_POINTER_MASK &
5891 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5892 sense_buf[17] =
5893 (IPR_FIELD_POINTER_MASK &
5894 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5895 } else {
5896 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5897 if (ipr_is_vset_device(res))
5898 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5899 else
5900 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5901
5902 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5903 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5904 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5905 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5906 sense_buf[6] = failing_lba & 0x000000ff;
5907 }
5908
5909 sense_buf[7] = 6; /* additional length */
5910 }
5911 }
5912 }
5913
5914 /**
5915 * ipr_get_autosense - Copy autosense data to sense buffer
5916 * @ipr_cmd: ipr command struct
5917 *
5918 * This function copies the autosense buffer to the buffer
5919 * in the scsi_cmd, if there is autosense available.
5920 *
5921 * Return value:
5922 * 1 if autosense was available / 0 if not
5923 **/
5924 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5925 {
5926 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5927 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5928
5929 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5930 return 0;
5931
5932 if (ipr_cmd->ioa_cfg->sis64)
5933 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5934 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5935 SCSI_SENSE_BUFFERSIZE));
5936 else
5937 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5938 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5939 SCSI_SENSE_BUFFERSIZE));
5940 return 1;
5941 }
5942
5943 /**
5944 * ipr_erp_start - Process an error response for a SCSI op
5945 * @ioa_cfg: ioa config struct
5946 * @ipr_cmd: ipr command struct
5947 *
5948 * This function determines whether or not to initiate ERP
5949 * on the affected device.
5950 *
5951 * Return value:
5952 * nothing
5953 **/
5954 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5955 struct ipr_cmnd *ipr_cmd)
5956 {
5957 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5958 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5959 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5960 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5961
5962 if (!res) {
5963 ipr_scsi_eh_done(ipr_cmd);
5964 return;
5965 }
5966
5967 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5968 ipr_gen_sense(ipr_cmd);
5969
5970 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5971
5972 switch (masked_ioasc) {
5973 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5974 if (ipr_is_naca_model(res))
5975 scsi_cmd->result |= (DID_ABORT << 16);
5976 else
5977 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5978 break;
5979 case IPR_IOASC_IR_RESOURCE_HANDLE:
5980 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5981 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5982 break;
5983 case IPR_IOASC_HW_SEL_TIMEOUT:
5984 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5985 if (!ipr_is_naca_model(res))
5986 res->needs_sync_complete = 1;
5987 break;
5988 case IPR_IOASC_SYNC_REQUIRED:
5989 if (!res->in_erp)
5990 res->needs_sync_complete = 1;
5991 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5992 break;
5993 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5994 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5995 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5996 break;
5997 case IPR_IOASC_BUS_WAS_RESET:
5998 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5999 /*
6000 * Report the bus reset and ask for a retry. The device
6001 * will give CC/UA the next command.
6002 */
6003 if (!res->resetting_device)
6004 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6005 scsi_cmd->result |= (DID_ERROR << 16);
6006 if (!ipr_is_naca_model(res))
6007 res->needs_sync_complete = 1;
6008 break;
6009 case IPR_IOASC_HW_DEV_BUS_STATUS:
6010 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6011 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6012 if (!ipr_get_autosense(ipr_cmd)) {
6013 if (!ipr_is_naca_model(res)) {
6014 ipr_erp_cancel_all(ipr_cmd);
6015 return;
6016 }
6017 }
6018 }
6019 if (!ipr_is_naca_model(res))
6020 res->needs_sync_complete = 1;
6021 break;
6022 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6023 break;
6024 default:
6025 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6026 scsi_cmd->result |= (DID_ERROR << 16);
6027 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6028 res->needs_sync_complete = 1;
6029 break;
6030 }
6031
6032 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6034 scsi_cmd->scsi_done(scsi_cmd);
6035 }
6036
6037 /**
6038 * ipr_scsi_done - mid-layer done function
6039 * @ipr_cmd: ipr command struct
6040 *
6041 * This function is invoked by the interrupt handler for
6042 * ops generated by the SCSI mid-layer
6043 *
6044 * Return value:
6045 * none
6046 **/
6047 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6048 {
6049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6050 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6052 unsigned long hrrq_flags;
6053
6054 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6055
6056 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6057 scsi_dma_unmap(scsi_cmd);
6058
6059 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6060 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6061 scsi_cmd->scsi_done(scsi_cmd);
6062 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6063 } else {
6064 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6065 ipr_erp_start(ioa_cfg, ipr_cmd);
6066 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6067 }
6068 }
6069
6070 /**
6071 * ipr_queuecommand - Queue a mid-layer request
6072 * @shost: scsi host struct
6073 * @scsi_cmd: scsi command struct
6074 *
6075 * This function queues a request generated by the mid-layer.
6076 *
6077 * Return value:
6078 * 0 on success
6079 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6080 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6081 **/
6082 static int ipr_queuecommand(struct Scsi_Host *shost,
6083 struct scsi_cmnd *scsi_cmd)
6084 {
6085 struct ipr_ioa_cfg *ioa_cfg;
6086 struct ipr_resource_entry *res;
6087 struct ipr_ioarcb *ioarcb;
6088 struct ipr_cmnd *ipr_cmd;
6089 unsigned long hrrq_flags, lock_flags;
6090 int rc;
6091 struct ipr_hrr_queue *hrrq;
6092 int hrrq_id;
6093
6094 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6095
6096 scsi_cmd->result = (DID_OK << 16);
6097 res = scsi_cmd->device->hostdata;
6098
6099 if (ipr_is_gata(res) && res->sata_port) {
6100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6101 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6103 return rc;
6104 }
6105
6106 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6107 hrrq = &ioa_cfg->hrrq[hrrq_id];
6108
6109 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6110 /*
6111 * We are currently blocking all devices due to a host reset
6112 * We have told the host to stop giving us new requests, but
6113 * ERP ops don't count. FIXME
6114 */
6115 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6116 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6117 return SCSI_MLQUEUE_HOST_BUSY;
6118 }
6119
6120 /*
6121 * FIXME - Create scsi_set_host_offline interface
6122 * and the ioa_is_dead check can be removed
6123 */
6124 if (unlikely(hrrq->ioa_is_dead || !res)) {
6125 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6126 goto err_nodev;
6127 }
6128
6129 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6130 if (ipr_cmd == NULL) {
6131 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6132 return SCSI_MLQUEUE_HOST_BUSY;
6133 }
6134 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6135
6136 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6137 ioarcb = &ipr_cmd->ioarcb;
6138
6139 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6140 ipr_cmd->scsi_cmd = scsi_cmd;
6141 ipr_cmd->done = ipr_scsi_eh_done;
6142
6143 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6144 if (scsi_cmd->underflow == 0)
6145 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6146
6147 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6148 if (ipr_is_gscsi(res))
6149 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6150 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6151 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6152 }
6153
6154 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6155 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6156 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6157 }
6158
6159 if (ioa_cfg->sis64)
6160 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6161 else
6162 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6163
6164 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6165 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6166 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6167 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6168 if (!rc)
6169 scsi_dma_unmap(scsi_cmd);
6170 return SCSI_MLQUEUE_HOST_BUSY;
6171 }
6172
6173 if (unlikely(hrrq->ioa_is_dead)) {
6174 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6175 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6176 scsi_dma_unmap(scsi_cmd);
6177 goto err_nodev;
6178 }
6179
6180 ioarcb->res_handle = res->res_handle;
6181 if (res->needs_sync_complete) {
6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6183 res->needs_sync_complete = 0;
6184 }
6185 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6186 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6187 ipr_send_command(ipr_cmd);
6188 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6189 return 0;
6190
6191 err_nodev:
6192 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6193 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6194 scsi_cmd->result = (DID_NO_CONNECT << 16);
6195 scsi_cmd->scsi_done(scsi_cmd);
6196 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6197 return 0;
6198 }
6199
6200 /**
6201 * ipr_ioctl - IOCTL handler
6202 * @sdev: scsi device struct
6203 * @cmd: IOCTL cmd
6204 * @arg: IOCTL arg
6205 *
6206 * Return value:
6207 * 0 on success / other on failure
6208 **/
6209 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6210 {
6211 struct ipr_resource_entry *res;
6212
6213 res = (struct ipr_resource_entry *)sdev->hostdata;
6214 if (res && ipr_is_gata(res)) {
6215 if (cmd == HDIO_GET_IDENTITY)
6216 return -ENOTTY;
6217 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6218 }
6219
6220 return -EINVAL;
6221 }
6222
6223 /**
6224 * ipr_info - Get information about the card/driver
6225 * @scsi_host: scsi host struct
6226 *
6227 * Return value:
6228 * pointer to buffer with description string
6229 **/
6230 static const char *ipr_ioa_info(struct Scsi_Host *host)
6231 {
6232 static char buffer[512];
6233 struct ipr_ioa_cfg *ioa_cfg;
6234 unsigned long lock_flags = 0;
6235
6236 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6237
6238 spin_lock_irqsave(host->host_lock, lock_flags);
6239 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6240 spin_unlock_irqrestore(host->host_lock, lock_flags);
6241
6242 return buffer;
6243 }
6244
6245 static struct scsi_host_template driver_template = {
6246 .module = THIS_MODULE,
6247 .name = "IPR",
6248 .info = ipr_ioa_info,
6249 .ioctl = ipr_ioctl,
6250 .queuecommand = ipr_queuecommand,
6251 .eh_abort_handler = ipr_eh_abort,
6252 .eh_device_reset_handler = ipr_eh_dev_reset,
6253 .eh_host_reset_handler = ipr_eh_host_reset,
6254 .slave_alloc = ipr_slave_alloc,
6255 .slave_configure = ipr_slave_configure,
6256 .slave_destroy = ipr_slave_destroy,
6257 .target_alloc = ipr_target_alloc,
6258 .target_destroy = ipr_target_destroy,
6259 .change_queue_depth = ipr_change_queue_depth,
6260 .change_queue_type = ipr_change_queue_type,
6261 .bios_param = ipr_biosparam,
6262 .can_queue = IPR_MAX_COMMANDS,
6263 .this_id = -1,
6264 .sg_tablesize = IPR_MAX_SGLIST,
6265 .max_sectors = IPR_IOA_MAX_SECTORS,
6266 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6267 .use_clustering = ENABLE_CLUSTERING,
6268 .shost_attrs = ipr_ioa_attrs,
6269 .sdev_attrs = ipr_dev_attrs,
6270 .proc_name = IPR_NAME
6271 };
6272
6273 /**
6274 * ipr_ata_phy_reset - libata phy_reset handler
6275 * @ap: ata port to reset
6276 *
6277 **/
6278 static void ipr_ata_phy_reset(struct ata_port *ap)
6279 {
6280 unsigned long flags;
6281 struct ipr_sata_port *sata_port = ap->private_data;
6282 struct ipr_resource_entry *res = sata_port->res;
6283 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6284 int rc;
6285
6286 ENTER;
6287 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6288 while (ioa_cfg->in_reset_reload) {
6289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6290 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6291 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6292 }
6293
6294 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6295 goto out_unlock;
6296
6297 rc = ipr_device_reset(ioa_cfg, res);
6298
6299 if (rc) {
6300 ap->link.device[0].class = ATA_DEV_NONE;
6301 goto out_unlock;
6302 }
6303
6304 ap->link.device[0].class = res->ata_class;
6305 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6306 ap->link.device[0].class = ATA_DEV_NONE;
6307
6308 out_unlock:
6309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6310 LEAVE;
6311 }
6312
6313 /**
6314 * ipr_ata_post_internal - Cleanup after an internal command
6315 * @qc: ATA queued command
6316 *
6317 * Return value:
6318 * none
6319 **/
6320 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6321 {
6322 struct ipr_sata_port *sata_port = qc->ap->private_data;
6323 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6324 struct ipr_cmnd *ipr_cmd;
6325 struct ipr_hrr_queue *hrrq;
6326 unsigned long flags;
6327
6328 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6329 while (ioa_cfg->in_reset_reload) {
6330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6331 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6332 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6333 }
6334
6335 for_each_hrrq(hrrq, ioa_cfg) {
6336 spin_lock(&hrrq->_lock);
6337 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6338 if (ipr_cmd->qc == qc) {
6339 ipr_device_reset(ioa_cfg, sata_port->res);
6340 break;
6341 }
6342 }
6343 spin_unlock(&hrrq->_lock);
6344 }
6345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6346 }
6347
6348 /**
6349 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6350 * @regs: destination
6351 * @tf: source ATA taskfile
6352 *
6353 * Return value:
6354 * none
6355 **/
6356 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6357 struct ata_taskfile *tf)
6358 {
6359 regs->feature = tf->feature;
6360 regs->nsect = tf->nsect;
6361 regs->lbal = tf->lbal;
6362 regs->lbam = tf->lbam;
6363 regs->lbah = tf->lbah;
6364 regs->device = tf->device;
6365 regs->command = tf->command;
6366 regs->hob_feature = tf->hob_feature;
6367 regs->hob_nsect = tf->hob_nsect;
6368 regs->hob_lbal = tf->hob_lbal;
6369 regs->hob_lbam = tf->hob_lbam;
6370 regs->hob_lbah = tf->hob_lbah;
6371 regs->ctl = tf->ctl;
6372 }
6373
6374 /**
6375 * ipr_sata_done - done function for SATA commands
6376 * @ipr_cmd: ipr command struct
6377 *
6378 * This function is invoked by the interrupt handler for
6379 * ops generated by the SCSI mid-layer to SATA devices
6380 *
6381 * Return value:
6382 * none
6383 **/
6384 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6385 {
6386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6387 struct ata_queued_cmd *qc = ipr_cmd->qc;
6388 struct ipr_sata_port *sata_port = qc->ap->private_data;
6389 struct ipr_resource_entry *res = sata_port->res;
6390 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6391
6392 spin_lock(&ipr_cmd->hrrq->_lock);
6393 if (ipr_cmd->ioa_cfg->sis64)
6394 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6395 sizeof(struct ipr_ioasa_gata));
6396 else
6397 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6398 sizeof(struct ipr_ioasa_gata));
6399 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6400
6401 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6402 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6403
6404 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6405 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6406 else
6407 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6408 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6409 spin_unlock(&ipr_cmd->hrrq->_lock);
6410 ata_qc_complete(qc);
6411 }
6412
6413 /**
6414 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6415 * @ipr_cmd: ipr command struct
6416 * @qc: ATA queued command
6417 *
6418 **/
6419 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6420 struct ata_queued_cmd *qc)
6421 {
6422 u32 ioadl_flags = 0;
6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6426 int len = qc->nbytes;
6427 struct scatterlist *sg;
6428 unsigned int si;
6429 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6430
6431 if (len == 0)
6432 return;
6433
6434 if (qc->dma_dir == DMA_TO_DEVICE) {
6435 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6436 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6437 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6438 ioadl_flags = IPR_IOADL_FLAGS_READ;
6439
6440 ioarcb->data_transfer_length = cpu_to_be32(len);
6441 ioarcb->ioadl_len =
6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6443 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6445
6446 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6447 ioadl64->flags = cpu_to_be32(ioadl_flags);
6448 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6449 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6450
6451 last_ioadl64 = ioadl64;
6452 ioadl64++;
6453 }
6454
6455 if (likely(last_ioadl64))
6456 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6457 }
6458
6459 /**
6460 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6461 * @ipr_cmd: ipr command struct
6462 * @qc: ATA queued command
6463 *
6464 **/
6465 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6466 struct ata_queued_cmd *qc)
6467 {
6468 u32 ioadl_flags = 0;
6469 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6470 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6471 struct ipr_ioadl_desc *last_ioadl = NULL;
6472 int len = qc->nbytes;
6473 struct scatterlist *sg;
6474 unsigned int si;
6475
6476 if (len == 0)
6477 return;
6478
6479 if (qc->dma_dir == DMA_TO_DEVICE) {
6480 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6481 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6482 ioarcb->data_transfer_length = cpu_to_be32(len);
6483 ioarcb->ioadl_len =
6484 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6485 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6486 ioadl_flags = IPR_IOADL_FLAGS_READ;
6487 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6488 ioarcb->read_ioadl_len =
6489 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6490 }
6491
6492 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6493 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6494 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6495
6496 last_ioadl = ioadl;
6497 ioadl++;
6498 }
6499
6500 if (likely(last_ioadl))
6501 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6502 }
6503
6504 /**
6505 * ipr_qc_defer - Get a free ipr_cmd
6506 * @qc: queued command
6507 *
6508 * Return value:
6509 * 0 if success
6510 **/
6511 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6512 {
6513 struct ata_port *ap = qc->ap;
6514 struct ipr_sata_port *sata_port = ap->private_data;
6515 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6516 struct ipr_cmnd *ipr_cmd;
6517 struct ipr_hrr_queue *hrrq;
6518 int hrrq_id;
6519
6520 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6521 hrrq = &ioa_cfg->hrrq[hrrq_id];
6522
6523 qc->lldd_task = NULL;
6524 spin_lock(&hrrq->_lock);
6525 if (unlikely(hrrq->ioa_is_dead)) {
6526 spin_unlock(&hrrq->_lock);
6527 return 0;
6528 }
6529
6530 if (unlikely(!hrrq->allow_cmds)) {
6531 spin_unlock(&hrrq->_lock);
6532 return ATA_DEFER_LINK;
6533 }
6534
6535 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6536 if (ipr_cmd == NULL) {
6537 spin_unlock(&hrrq->_lock);
6538 return ATA_DEFER_LINK;
6539 }
6540
6541 qc->lldd_task = ipr_cmd;
6542 spin_unlock(&hrrq->_lock);
6543 return 0;
6544 }
6545
6546 /**
6547 * ipr_qc_issue - Issue a SATA qc to a device
6548 * @qc: queued command
6549 *
6550 * Return value:
6551 * 0 if success
6552 **/
6553 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6554 {
6555 struct ata_port *ap = qc->ap;
6556 struct ipr_sata_port *sata_port = ap->private_data;
6557 struct ipr_resource_entry *res = sata_port->res;
6558 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6559 struct ipr_cmnd *ipr_cmd;
6560 struct ipr_ioarcb *ioarcb;
6561 struct ipr_ioarcb_ata_regs *regs;
6562
6563 if (qc->lldd_task == NULL)
6564 ipr_qc_defer(qc);
6565
6566 ipr_cmd = qc->lldd_task;
6567 if (ipr_cmd == NULL)
6568 return AC_ERR_SYSTEM;
6569
6570 qc->lldd_task = NULL;
6571 spin_lock(&ipr_cmd->hrrq->_lock);
6572 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6573 ipr_cmd->hrrq->ioa_is_dead)) {
6574 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6575 spin_unlock(&ipr_cmd->hrrq->_lock);
6576 return AC_ERR_SYSTEM;
6577 }
6578
6579 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6580 ioarcb = &ipr_cmd->ioarcb;
6581
6582 if (ioa_cfg->sis64) {
6583 regs = &ipr_cmd->i.ata_ioadl.regs;
6584 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6585 } else
6586 regs = &ioarcb->u.add_data.u.regs;
6587
6588 memset(regs, 0, sizeof(*regs));
6589 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6590
6591 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6592 ipr_cmd->qc = qc;
6593 ipr_cmd->done = ipr_sata_done;
6594 ipr_cmd->ioarcb.res_handle = res->res_handle;
6595 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6596 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6597 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6598 ipr_cmd->dma_use_sg = qc->n_elem;
6599
6600 if (ioa_cfg->sis64)
6601 ipr_build_ata_ioadl64(ipr_cmd, qc);
6602 else
6603 ipr_build_ata_ioadl(ipr_cmd, qc);
6604
6605 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6606 ipr_copy_sata_tf(regs, &qc->tf);
6607 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6608 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6609
6610 switch (qc->tf.protocol) {
6611 case ATA_PROT_NODATA:
6612 case ATA_PROT_PIO:
6613 break;
6614
6615 case ATA_PROT_DMA:
6616 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6617 break;
6618
6619 case ATAPI_PROT_PIO:
6620 case ATAPI_PROT_NODATA:
6621 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6622 break;
6623
6624 case ATAPI_PROT_DMA:
6625 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6626 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6627 break;
6628
6629 default:
6630 WARN_ON(1);
6631 spin_unlock(&ipr_cmd->hrrq->_lock);
6632 return AC_ERR_INVALID;
6633 }
6634
6635 ipr_send_command(ipr_cmd);
6636 spin_unlock(&ipr_cmd->hrrq->_lock);
6637
6638 return 0;
6639 }
6640
6641 /**
6642 * ipr_qc_fill_rtf - Read result TF
6643 * @qc: ATA queued command
6644 *
6645 * Return value:
6646 * true
6647 **/
6648 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6649 {
6650 struct ipr_sata_port *sata_port = qc->ap->private_data;
6651 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6652 struct ata_taskfile *tf = &qc->result_tf;
6653
6654 tf->feature = g->error;
6655 tf->nsect = g->nsect;
6656 tf->lbal = g->lbal;
6657 tf->lbam = g->lbam;
6658 tf->lbah = g->lbah;
6659 tf->device = g->device;
6660 tf->command = g->status;
6661 tf->hob_nsect = g->hob_nsect;
6662 tf->hob_lbal = g->hob_lbal;
6663 tf->hob_lbam = g->hob_lbam;
6664 tf->hob_lbah = g->hob_lbah;
6665 tf->ctl = g->alt_status;
6666
6667 return true;
6668 }
6669
6670 static struct ata_port_operations ipr_sata_ops = {
6671 .phy_reset = ipr_ata_phy_reset,
6672 .hardreset = ipr_sata_reset,
6673 .post_internal_cmd = ipr_ata_post_internal,
6674 .qc_prep = ata_noop_qc_prep,
6675 .qc_defer = ipr_qc_defer,
6676 .qc_issue = ipr_qc_issue,
6677 .qc_fill_rtf = ipr_qc_fill_rtf,
6678 .port_start = ata_sas_port_start,
6679 .port_stop = ata_sas_port_stop
6680 };
6681
6682 static struct ata_port_info sata_port_info = {
6683 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6684 .pio_mask = ATA_PIO4_ONLY,
6685 .mwdma_mask = ATA_MWDMA2,
6686 .udma_mask = ATA_UDMA6,
6687 .port_ops = &ipr_sata_ops
6688 };
6689
6690 #ifdef CONFIG_PPC_PSERIES
6691 static const u16 ipr_blocked_processors[] = {
6692 PVR_NORTHSTAR,
6693 PVR_PULSAR,
6694 PVR_POWER4,
6695 PVR_ICESTAR,
6696 PVR_SSTAR,
6697 PVR_POWER4p,
6698 PVR_630,
6699 PVR_630p
6700 };
6701
6702 /**
6703 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6704 * @ioa_cfg: ioa cfg struct
6705 *
6706 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6707 * certain pSeries hardware. This function determines if the given
6708 * adapter is in one of these confgurations or not.
6709 *
6710 * Return value:
6711 * 1 if adapter is not supported / 0 if adapter is supported
6712 **/
6713 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6714 {
6715 int i;
6716
6717 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6718 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6719 if (pvr_version_is(ipr_blocked_processors[i]))
6720 return 1;
6721 }
6722 }
6723 return 0;
6724 }
6725 #else
6726 #define ipr_invalid_adapter(ioa_cfg) 0
6727 #endif
6728
6729 /**
6730 * ipr_ioa_bringdown_done - IOA bring down completion.
6731 * @ipr_cmd: ipr command struct
6732 *
6733 * This function processes the completion of an adapter bring down.
6734 * It wakes any reset sleepers.
6735 *
6736 * Return value:
6737 * IPR_RC_JOB_RETURN
6738 **/
6739 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6740 {
6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6742
6743 ENTER;
6744 ioa_cfg->in_reset_reload = 0;
6745 ioa_cfg->reset_retries = 0;
6746 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6747 wake_up_all(&ioa_cfg->reset_wait_q);
6748
6749 spin_unlock_irq(ioa_cfg->host->host_lock);
6750 scsi_unblock_requests(ioa_cfg->host);
6751 spin_lock_irq(ioa_cfg->host->host_lock);
6752 LEAVE;
6753
6754 return IPR_RC_JOB_RETURN;
6755 }
6756
6757 /**
6758 * ipr_ioa_reset_done - IOA reset completion.
6759 * @ipr_cmd: ipr command struct
6760 *
6761 * This function processes the completion of an adapter reset.
6762 * It schedules any necessary mid-layer add/removes and
6763 * wakes any reset sleepers.
6764 *
6765 * Return value:
6766 * IPR_RC_JOB_RETURN
6767 **/
6768 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6769 {
6770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6771 struct ipr_resource_entry *res;
6772 struct ipr_hostrcb *hostrcb, *temp;
6773 int i = 0, j;
6774
6775 ENTER;
6776 ioa_cfg->in_reset_reload = 0;
6777 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6778 spin_lock(&ioa_cfg->hrrq[j]._lock);
6779 ioa_cfg->hrrq[j].allow_cmds = 1;
6780 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6781 }
6782 wmb();
6783 ioa_cfg->reset_cmd = NULL;
6784 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6785
6786 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6787 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6788 ipr_trace;
6789 break;
6790 }
6791 }
6792 schedule_work(&ioa_cfg->work_q);
6793
6794 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6795 list_del(&hostrcb->queue);
6796 if (i++ < IPR_NUM_LOG_HCAMS)
6797 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6798 else
6799 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6800 }
6801
6802 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6803 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6804
6805 ioa_cfg->reset_retries = 0;
6806 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6807 wake_up_all(&ioa_cfg->reset_wait_q);
6808
6809 spin_unlock(ioa_cfg->host->host_lock);
6810 scsi_unblock_requests(ioa_cfg->host);
6811 spin_lock(ioa_cfg->host->host_lock);
6812
6813 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6814 scsi_block_requests(ioa_cfg->host);
6815
6816 LEAVE;
6817 return IPR_RC_JOB_RETURN;
6818 }
6819
6820 /**
6821 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6822 * @supported_dev: supported device struct
6823 * @vpids: vendor product id struct
6824 *
6825 * Return value:
6826 * none
6827 **/
6828 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6829 struct ipr_std_inq_vpids *vpids)
6830 {
6831 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6832 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6833 supported_dev->num_records = 1;
6834 supported_dev->data_length =
6835 cpu_to_be16(sizeof(struct ipr_supported_device));
6836 supported_dev->reserved = 0;
6837 }
6838
6839 /**
6840 * ipr_set_supported_devs - Send Set Supported Devices for a device
6841 * @ipr_cmd: ipr command struct
6842 *
6843 * This function sends a Set Supported Devices to the adapter
6844 *
6845 * Return value:
6846 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6847 **/
6848 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6849 {
6850 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6851 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6852 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6853 struct ipr_resource_entry *res = ipr_cmd->u.res;
6854
6855 ipr_cmd->job_step = ipr_ioa_reset_done;
6856
6857 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6858 if (!ipr_is_scsi_disk(res))
6859 continue;
6860
6861 ipr_cmd->u.res = res;
6862 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6863
6864 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6865 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6866 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6867
6868 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6869 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6870 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6871 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6872
6873 ipr_init_ioadl(ipr_cmd,
6874 ioa_cfg->vpd_cbs_dma +
6875 offsetof(struct ipr_misc_cbs, supp_dev),
6876 sizeof(struct ipr_supported_device),
6877 IPR_IOADL_FLAGS_WRITE_LAST);
6878
6879 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6880 IPR_SET_SUP_DEVICE_TIMEOUT);
6881
6882 if (!ioa_cfg->sis64)
6883 ipr_cmd->job_step = ipr_set_supported_devs;
6884 LEAVE;
6885 return IPR_RC_JOB_RETURN;
6886 }
6887
6888 LEAVE;
6889 return IPR_RC_JOB_CONTINUE;
6890 }
6891
6892 /**
6893 * ipr_get_mode_page - Locate specified mode page
6894 * @mode_pages: mode page buffer
6895 * @page_code: page code to find
6896 * @len: minimum required length for mode page
6897 *
6898 * Return value:
6899 * pointer to mode page / NULL on failure
6900 **/
6901 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6902 u32 page_code, u32 len)
6903 {
6904 struct ipr_mode_page_hdr *mode_hdr;
6905 u32 page_length;
6906 u32 length;
6907
6908 if (!mode_pages || (mode_pages->hdr.length == 0))
6909 return NULL;
6910
6911 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6912 mode_hdr = (struct ipr_mode_page_hdr *)
6913 (mode_pages->data + mode_pages->hdr.block_desc_len);
6914
6915 while (length) {
6916 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6917 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6918 return mode_hdr;
6919 break;
6920 } else {
6921 page_length = (sizeof(struct ipr_mode_page_hdr) +
6922 mode_hdr->page_length);
6923 length -= page_length;
6924 mode_hdr = (struct ipr_mode_page_hdr *)
6925 ((unsigned long)mode_hdr + page_length);
6926 }
6927 }
6928 return NULL;
6929 }
6930
6931 /**
6932 * ipr_check_term_power - Check for term power errors
6933 * @ioa_cfg: ioa config struct
6934 * @mode_pages: IOAFP mode pages buffer
6935 *
6936 * Check the IOAFP's mode page 28 for term power errors
6937 *
6938 * Return value:
6939 * nothing
6940 **/
6941 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6942 struct ipr_mode_pages *mode_pages)
6943 {
6944 int i;
6945 int entry_length;
6946 struct ipr_dev_bus_entry *bus;
6947 struct ipr_mode_page28 *mode_page;
6948
6949 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6950 sizeof(struct ipr_mode_page28));
6951
6952 entry_length = mode_page->entry_length;
6953
6954 bus = mode_page->bus;
6955
6956 for (i = 0; i < mode_page->num_entries; i++) {
6957 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6958 dev_err(&ioa_cfg->pdev->dev,
6959 "Term power is absent on scsi bus %d\n",
6960 bus->res_addr.bus);
6961 }
6962
6963 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6964 }
6965 }
6966
6967 /**
6968 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6969 * @ioa_cfg: ioa config struct
6970 *
6971 * Looks through the config table checking for SES devices. If
6972 * the SES device is in the SES table indicating a maximum SCSI
6973 * bus speed, the speed is limited for the bus.
6974 *
6975 * Return value:
6976 * none
6977 **/
6978 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6979 {
6980 u32 max_xfer_rate;
6981 int i;
6982
6983 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6984 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6985 ioa_cfg->bus_attr[i].bus_width);
6986
6987 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6988 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6989 }
6990 }
6991
6992 /**
6993 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6994 * @ioa_cfg: ioa config struct
6995 * @mode_pages: mode page 28 buffer
6996 *
6997 * Updates mode page 28 based on driver configuration
6998 *
6999 * Return value:
7000 * none
7001 **/
7002 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7003 struct ipr_mode_pages *mode_pages)
7004 {
7005 int i, entry_length;
7006 struct ipr_dev_bus_entry *bus;
7007 struct ipr_bus_attributes *bus_attr;
7008 struct ipr_mode_page28 *mode_page;
7009
7010 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7011 sizeof(struct ipr_mode_page28));
7012
7013 entry_length = mode_page->entry_length;
7014
7015 /* Loop for each device bus entry */
7016 for (i = 0, bus = mode_page->bus;
7017 i < mode_page->num_entries;
7018 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7019 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7020 dev_err(&ioa_cfg->pdev->dev,
7021 "Invalid resource address reported: 0x%08X\n",
7022 IPR_GET_PHYS_LOC(bus->res_addr));
7023 continue;
7024 }
7025
7026 bus_attr = &ioa_cfg->bus_attr[i];
7027 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7028 bus->bus_width = bus_attr->bus_width;
7029 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7030 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7031 if (bus_attr->qas_enabled)
7032 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7033 else
7034 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7035 }
7036 }
7037
7038 /**
7039 * ipr_build_mode_select - Build a mode select command
7040 * @ipr_cmd: ipr command struct
7041 * @res_handle: resource handle to send command to
7042 * @parm: Byte 2 of Mode Sense command
7043 * @dma_addr: DMA buffer address
7044 * @xfer_len: data transfer length
7045 *
7046 * Return value:
7047 * none
7048 **/
7049 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7050 __be32 res_handle, u8 parm,
7051 dma_addr_t dma_addr, u8 xfer_len)
7052 {
7053 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7054
7055 ioarcb->res_handle = res_handle;
7056 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7057 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7058 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7059 ioarcb->cmd_pkt.cdb[1] = parm;
7060 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7061
7062 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7063 }
7064
7065 /**
7066 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7067 * @ipr_cmd: ipr command struct
7068 *
7069 * This function sets up the SCSI bus attributes and sends
7070 * a Mode Select for Page 28 to activate them.
7071 *
7072 * Return value:
7073 * IPR_RC_JOB_RETURN
7074 **/
7075 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7076 {
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7079 int length;
7080
7081 ENTER;
7082 ipr_scsi_bus_speed_limit(ioa_cfg);
7083 ipr_check_term_power(ioa_cfg, mode_pages);
7084 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7085 length = mode_pages->hdr.length + 1;
7086 mode_pages->hdr.length = 0;
7087
7088 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7089 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7090 length);
7091
7092 ipr_cmd->job_step = ipr_set_supported_devs;
7093 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7094 struct ipr_resource_entry, queue);
7095 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7096
7097 LEAVE;
7098 return IPR_RC_JOB_RETURN;
7099 }
7100
7101 /**
7102 * ipr_build_mode_sense - Builds a mode sense command
7103 * @ipr_cmd: ipr command struct
7104 * @res: resource entry struct
7105 * @parm: Byte 2 of mode sense command
7106 * @dma_addr: DMA address of mode sense buffer
7107 * @xfer_len: Size of DMA buffer
7108 *
7109 * Return value:
7110 * none
7111 **/
7112 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7113 __be32 res_handle,
7114 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7115 {
7116 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7117
7118 ioarcb->res_handle = res_handle;
7119 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7120 ioarcb->cmd_pkt.cdb[2] = parm;
7121 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7122 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7123
7124 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7125 }
7126
7127 /**
7128 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7129 * @ipr_cmd: ipr command struct
7130 *
7131 * This function handles the failure of an IOA bringup command.
7132 *
7133 * Return value:
7134 * IPR_RC_JOB_RETURN
7135 **/
7136 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7137 {
7138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7139 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7140
7141 dev_err(&ioa_cfg->pdev->dev,
7142 "0x%02X failed with IOASC: 0x%08X\n",
7143 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7144
7145 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7146 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7147 return IPR_RC_JOB_RETURN;
7148 }
7149
7150 /**
7151 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7152 * @ipr_cmd: ipr command struct
7153 *
7154 * This function handles the failure of a Mode Sense to the IOAFP.
7155 * Some adapters do not handle all mode pages.
7156 *
7157 * Return value:
7158 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7159 **/
7160 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7161 {
7162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7163 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7164
7165 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7166 ipr_cmd->job_step = ipr_set_supported_devs;
7167 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7168 struct ipr_resource_entry, queue);
7169 return IPR_RC_JOB_CONTINUE;
7170 }
7171
7172 return ipr_reset_cmd_failed(ipr_cmd);
7173 }
7174
7175 /**
7176 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7177 * @ipr_cmd: ipr command struct
7178 *
7179 * This function send a Page 28 mode sense to the IOA to
7180 * retrieve SCSI bus attributes.
7181 *
7182 * Return value:
7183 * IPR_RC_JOB_RETURN
7184 **/
7185 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7186 {
7187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7188
7189 ENTER;
7190 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7191 0x28, ioa_cfg->vpd_cbs_dma +
7192 offsetof(struct ipr_misc_cbs, mode_pages),
7193 sizeof(struct ipr_mode_pages));
7194
7195 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7196 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7197
7198 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7199
7200 LEAVE;
7201 return IPR_RC_JOB_RETURN;
7202 }
7203
7204 /**
7205 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7206 * @ipr_cmd: ipr command struct
7207 *
7208 * This function enables dual IOA RAID support if possible.
7209 *
7210 * Return value:
7211 * IPR_RC_JOB_RETURN
7212 **/
7213 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7214 {
7215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7216 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7217 struct ipr_mode_page24 *mode_page;
7218 int length;
7219
7220 ENTER;
7221 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7222 sizeof(struct ipr_mode_page24));
7223
7224 if (mode_page)
7225 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7226
7227 length = mode_pages->hdr.length + 1;
7228 mode_pages->hdr.length = 0;
7229
7230 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7231 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7232 length);
7233
7234 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7235 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7236
7237 LEAVE;
7238 return IPR_RC_JOB_RETURN;
7239 }
7240
7241 /**
7242 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7243 * @ipr_cmd: ipr command struct
7244 *
7245 * This function handles the failure of a Mode Sense to the IOAFP.
7246 * Some adapters do not handle all mode pages.
7247 *
7248 * Return value:
7249 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7250 **/
7251 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7252 {
7253 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7254
7255 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7256 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7257 return IPR_RC_JOB_CONTINUE;
7258 }
7259
7260 return ipr_reset_cmd_failed(ipr_cmd);
7261 }
7262
7263 /**
7264 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7265 * @ipr_cmd: ipr command struct
7266 *
7267 * This function send a mode sense to the IOA to retrieve
7268 * the IOA Advanced Function Control mode page.
7269 *
7270 * Return value:
7271 * IPR_RC_JOB_RETURN
7272 **/
7273 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7274 {
7275 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7276
7277 ENTER;
7278 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7279 0x24, ioa_cfg->vpd_cbs_dma +
7280 offsetof(struct ipr_misc_cbs, mode_pages),
7281 sizeof(struct ipr_mode_pages));
7282
7283 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7284 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7285
7286 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7287
7288 LEAVE;
7289 return IPR_RC_JOB_RETURN;
7290 }
7291
7292 /**
7293 * ipr_init_res_table - Initialize the resource table
7294 * @ipr_cmd: ipr command struct
7295 *
7296 * This function looks through the existing resource table, comparing
7297 * it with the config table. This function will take care of old/new
7298 * devices and schedule adding/removing them from the mid-layer
7299 * as appropriate.
7300 *
7301 * Return value:
7302 * IPR_RC_JOB_CONTINUE
7303 **/
7304 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7305 {
7306 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7307 struct ipr_resource_entry *res, *temp;
7308 struct ipr_config_table_entry_wrapper cfgtew;
7309 int entries, found, flag, i;
7310 LIST_HEAD(old_res);
7311
7312 ENTER;
7313 if (ioa_cfg->sis64)
7314 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7315 else
7316 flag = ioa_cfg->u.cfg_table->hdr.flags;
7317
7318 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7319 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7320
7321 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7322 list_move_tail(&res->queue, &old_res);
7323
7324 if (ioa_cfg->sis64)
7325 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7326 else
7327 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7328
7329 for (i = 0; i < entries; i++) {
7330 if (ioa_cfg->sis64)
7331 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7332 else
7333 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7334 found = 0;
7335
7336 list_for_each_entry_safe(res, temp, &old_res, queue) {
7337 if (ipr_is_same_device(res, &cfgtew)) {
7338 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7339 found = 1;
7340 break;
7341 }
7342 }
7343
7344 if (!found) {
7345 if (list_empty(&ioa_cfg->free_res_q)) {
7346 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7347 break;
7348 }
7349
7350 found = 1;
7351 res = list_entry(ioa_cfg->free_res_q.next,
7352 struct ipr_resource_entry, queue);
7353 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7354 ipr_init_res_entry(res, &cfgtew);
7355 res->add_to_ml = 1;
7356 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7357 res->sdev->allow_restart = 1;
7358
7359 if (found)
7360 ipr_update_res_entry(res, &cfgtew);
7361 }
7362
7363 list_for_each_entry_safe(res, temp, &old_res, queue) {
7364 if (res->sdev) {
7365 res->del_from_ml = 1;
7366 res->res_handle = IPR_INVALID_RES_HANDLE;
7367 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7368 }
7369 }
7370
7371 list_for_each_entry_safe(res, temp, &old_res, queue) {
7372 ipr_clear_res_target(res);
7373 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7374 }
7375
7376 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7377 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7378 else
7379 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7380
7381 LEAVE;
7382 return IPR_RC_JOB_CONTINUE;
7383 }
7384
7385 /**
7386 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7387 * @ipr_cmd: ipr command struct
7388 *
7389 * This function sends a Query IOA Configuration command
7390 * to the adapter to retrieve the IOA configuration table.
7391 *
7392 * Return value:
7393 * IPR_RC_JOB_RETURN
7394 **/
7395 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7396 {
7397 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7398 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7399 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7400 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7401
7402 ENTER;
7403 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7404 ioa_cfg->dual_raid = 1;
7405 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7406 ucode_vpd->major_release, ucode_vpd->card_type,
7407 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7408 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7409 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7410
7411 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7412 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7413 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7414 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7415
7416 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7417 IPR_IOADL_FLAGS_READ_LAST);
7418
7419 ipr_cmd->job_step = ipr_init_res_table;
7420
7421 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7422
7423 LEAVE;
7424 return IPR_RC_JOB_RETURN;
7425 }
7426
7427 /**
7428 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7429 * @ipr_cmd: ipr command struct
7430 *
7431 * This utility function sends an inquiry to the adapter.
7432 *
7433 * Return value:
7434 * none
7435 **/
7436 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7437 dma_addr_t dma_addr, u8 xfer_len)
7438 {
7439 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7440
7441 ENTER;
7442 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7443 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7444
7445 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7446 ioarcb->cmd_pkt.cdb[1] = flags;
7447 ioarcb->cmd_pkt.cdb[2] = page;
7448 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7449
7450 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7451
7452 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7453 LEAVE;
7454 }
7455
7456 /**
7457 * ipr_inquiry_page_supported - Is the given inquiry page supported
7458 * @page0: inquiry page 0 buffer
7459 * @page: page code.
7460 *
7461 * This function determines if the specified inquiry page is supported.
7462 *
7463 * Return value:
7464 * 1 if page is supported / 0 if not
7465 **/
7466 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7467 {
7468 int i;
7469
7470 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7471 if (page0->page[i] == page)
7472 return 1;
7473
7474 return 0;
7475 }
7476
7477 /**
7478 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7479 * @ipr_cmd: ipr command struct
7480 *
7481 * This function sends a Page 0xD0 inquiry to the adapter
7482 * to retrieve adapter capabilities.
7483 *
7484 * Return value:
7485 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7486 **/
7487 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7488 {
7489 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7490 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7491 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7492
7493 ENTER;
7494 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7495 memset(cap, 0, sizeof(*cap));
7496
7497 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7498 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7499 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7500 sizeof(struct ipr_inquiry_cap));
7501 return IPR_RC_JOB_RETURN;
7502 }
7503
7504 LEAVE;
7505 return IPR_RC_JOB_CONTINUE;
7506 }
7507
7508 /**
7509 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7510 * @ipr_cmd: ipr command struct
7511 *
7512 * This function sends a Page 3 inquiry to the adapter
7513 * to retrieve software VPD information.
7514 *
7515 * Return value:
7516 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7517 **/
7518 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7519 {
7520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7521
7522 ENTER;
7523
7524 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7525
7526 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7527 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7528 sizeof(struct ipr_inquiry_page3));
7529
7530 LEAVE;
7531 return IPR_RC_JOB_RETURN;
7532 }
7533
7534 /**
7535 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7536 * @ipr_cmd: ipr command struct
7537 *
7538 * This function sends a Page 0 inquiry to the adapter
7539 * to retrieve supported inquiry pages.
7540 *
7541 * Return value:
7542 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7543 **/
7544 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7545 {
7546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7547 char type[5];
7548
7549 ENTER;
7550
7551 /* Grab the type out of the VPD and store it away */
7552 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7553 type[4] = '\0';
7554 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7555
7556 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7557
7558 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7559 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7560 sizeof(struct ipr_inquiry_page0));
7561
7562 LEAVE;
7563 return IPR_RC_JOB_RETURN;
7564 }
7565
7566 /**
7567 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7568 * @ipr_cmd: ipr command struct
7569 *
7570 * This function sends a standard inquiry to the adapter.
7571 *
7572 * Return value:
7573 * IPR_RC_JOB_RETURN
7574 **/
7575 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7576 {
7577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7578
7579 ENTER;
7580 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7581
7582 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7583 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7584 sizeof(struct ipr_ioa_vpd));
7585
7586 LEAVE;
7587 return IPR_RC_JOB_RETURN;
7588 }
7589
7590 /**
7591 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7592 * @ipr_cmd: ipr command struct
7593 *
7594 * This function send an Identify Host Request Response Queue
7595 * command to establish the HRRQ with the adapter.
7596 *
7597 * Return value:
7598 * IPR_RC_JOB_RETURN
7599 **/
7600 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7601 {
7602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7603 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7604 struct ipr_hrr_queue *hrrq;
7605
7606 ENTER;
7607 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7608 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7609
7610 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7611 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7612
7613 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7614 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7615
7616 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7617 if (ioa_cfg->sis64)
7618 ioarcb->cmd_pkt.cdb[1] = 0x1;
7619
7620 if (ioa_cfg->nvectors == 1)
7621 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7622 else
7623 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7624
7625 ioarcb->cmd_pkt.cdb[2] =
7626 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7627 ioarcb->cmd_pkt.cdb[3] =
7628 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7629 ioarcb->cmd_pkt.cdb[4] =
7630 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7631 ioarcb->cmd_pkt.cdb[5] =
7632 ((u64) hrrq->host_rrq_dma) & 0xff;
7633 ioarcb->cmd_pkt.cdb[7] =
7634 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7635 ioarcb->cmd_pkt.cdb[8] =
7636 (sizeof(u32) * hrrq->size) & 0xff;
7637
7638 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7639 ioarcb->cmd_pkt.cdb[9] =
7640 ioa_cfg->identify_hrrq_index;
7641
7642 if (ioa_cfg->sis64) {
7643 ioarcb->cmd_pkt.cdb[10] =
7644 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7645 ioarcb->cmd_pkt.cdb[11] =
7646 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7647 ioarcb->cmd_pkt.cdb[12] =
7648 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7649 ioarcb->cmd_pkt.cdb[13] =
7650 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7651 }
7652
7653 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7654 ioarcb->cmd_pkt.cdb[14] =
7655 ioa_cfg->identify_hrrq_index;
7656
7657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7658 IPR_INTERNAL_TIMEOUT);
7659
7660 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7661 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7662
7663 LEAVE;
7664 return IPR_RC_JOB_RETURN;
7665 }
7666
7667 LEAVE;
7668 return IPR_RC_JOB_CONTINUE;
7669 }
7670
7671 /**
7672 * ipr_reset_timer_done - Adapter reset timer function
7673 * @ipr_cmd: ipr command struct
7674 *
7675 * Description: This function is used in adapter reset processing
7676 * for timing events. If the reset_cmd pointer in the IOA
7677 * config struct is not this adapter's we are doing nested
7678 * resets and fail_all_ops will take care of freeing the
7679 * command block.
7680 *
7681 * Return value:
7682 * none
7683 **/
7684 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7685 {
7686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7687 unsigned long lock_flags = 0;
7688
7689 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7690
7691 if (ioa_cfg->reset_cmd == ipr_cmd) {
7692 list_del(&ipr_cmd->queue);
7693 ipr_cmd->done(ipr_cmd);
7694 }
7695
7696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7697 }
7698
7699 /**
7700 * ipr_reset_start_timer - Start a timer for adapter reset job
7701 * @ipr_cmd: ipr command struct
7702 * @timeout: timeout value
7703 *
7704 * Description: This function is used in adapter reset processing
7705 * for timing events. If the reset_cmd pointer in the IOA
7706 * config struct is not this adapter's we are doing nested
7707 * resets and fail_all_ops will take care of freeing the
7708 * command block.
7709 *
7710 * Return value:
7711 * none
7712 **/
7713 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7714 unsigned long timeout)
7715 {
7716
7717 ENTER;
7718 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7719 ipr_cmd->done = ipr_reset_ioa_job;
7720
7721 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7722 ipr_cmd->timer.expires = jiffies + timeout;
7723 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7724 add_timer(&ipr_cmd->timer);
7725 }
7726
7727 /**
7728 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7729 * @ioa_cfg: ioa cfg struct
7730 *
7731 * Return value:
7732 * nothing
7733 **/
7734 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7735 {
7736 struct ipr_hrr_queue *hrrq;
7737
7738 for_each_hrrq(hrrq, ioa_cfg) {
7739 spin_lock(&hrrq->_lock);
7740 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7741
7742 /* Initialize Host RRQ pointers */
7743 hrrq->hrrq_start = hrrq->host_rrq;
7744 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7745 hrrq->hrrq_curr = hrrq->hrrq_start;
7746 hrrq->toggle_bit = 1;
7747 spin_unlock(&hrrq->_lock);
7748 }
7749 wmb();
7750
7751 ioa_cfg->identify_hrrq_index = 0;
7752 if (ioa_cfg->hrrq_num == 1)
7753 atomic_set(&ioa_cfg->hrrq_index, 0);
7754 else
7755 atomic_set(&ioa_cfg->hrrq_index, 1);
7756
7757 /* Zero out config table */
7758 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7759 }
7760
7761 /**
7762 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7763 * @ipr_cmd: ipr command struct
7764 *
7765 * Return value:
7766 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7767 **/
7768 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7769 {
7770 unsigned long stage, stage_time;
7771 u32 feedback;
7772 volatile u32 int_reg;
7773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7774 u64 maskval = 0;
7775
7776 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7777 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7778 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7779
7780 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7781
7782 /* sanity check the stage_time value */
7783 if (stage_time == 0)
7784 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7785 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7786 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7787 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7788 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7789
7790 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7791 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7792 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7793 stage_time = ioa_cfg->transop_timeout;
7794 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7795 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7796 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7797 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7798 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7799 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7800 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7801 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7802 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7803 return IPR_RC_JOB_CONTINUE;
7804 }
7805 }
7806
7807 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7808 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7809 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7810 ipr_cmd->done = ipr_reset_ioa_job;
7811 add_timer(&ipr_cmd->timer);
7812
7813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7814
7815 return IPR_RC_JOB_RETURN;
7816 }
7817
7818 /**
7819 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7820 * @ipr_cmd: ipr command struct
7821 *
7822 * This function reinitializes some control blocks and
7823 * enables destructive diagnostics on the adapter.
7824 *
7825 * Return value:
7826 * IPR_RC_JOB_RETURN
7827 **/
7828 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7829 {
7830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7831 volatile u32 int_reg;
7832 volatile u64 maskval;
7833 int i;
7834
7835 ENTER;
7836 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7837 ipr_init_ioa_mem(ioa_cfg);
7838
7839 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7840 spin_lock(&ioa_cfg->hrrq[i]._lock);
7841 ioa_cfg->hrrq[i].allow_interrupts = 1;
7842 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7843 }
7844 wmb();
7845 if (ioa_cfg->sis64) {
7846 /* Set the adapter to the correct endian mode. */
7847 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7848 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7849 }
7850
7851 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7852
7853 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7854 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7855 ioa_cfg->regs.clr_interrupt_mask_reg32);
7856 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7857 return IPR_RC_JOB_CONTINUE;
7858 }
7859
7860 /* Enable destructive diagnostics on IOA */
7861 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7862
7863 if (ioa_cfg->sis64) {
7864 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7865 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7866 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7867 } else
7868 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7869
7870 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7871
7872 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7873
7874 if (ioa_cfg->sis64) {
7875 ipr_cmd->job_step = ipr_reset_next_stage;
7876 return IPR_RC_JOB_CONTINUE;
7877 }
7878
7879 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7880 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7881 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7882 ipr_cmd->done = ipr_reset_ioa_job;
7883 add_timer(&ipr_cmd->timer);
7884 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7885
7886 LEAVE;
7887 return IPR_RC_JOB_RETURN;
7888 }
7889
7890 /**
7891 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7892 * @ipr_cmd: ipr command struct
7893 *
7894 * This function is invoked when an adapter dump has run out
7895 * of processing time.
7896 *
7897 * Return value:
7898 * IPR_RC_JOB_CONTINUE
7899 **/
7900 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7901 {
7902 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7903
7904 if (ioa_cfg->sdt_state == GET_DUMP)
7905 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7906 else if (ioa_cfg->sdt_state == READ_DUMP)
7907 ioa_cfg->sdt_state = ABORT_DUMP;
7908
7909 ioa_cfg->dump_timeout = 1;
7910 ipr_cmd->job_step = ipr_reset_alert;
7911
7912 return IPR_RC_JOB_CONTINUE;
7913 }
7914
7915 /**
7916 * ipr_unit_check_no_data - Log a unit check/no data error log
7917 * @ioa_cfg: ioa config struct
7918 *
7919 * Logs an error indicating the adapter unit checked, but for some
7920 * reason, we were unable to fetch the unit check buffer.
7921 *
7922 * Return value:
7923 * nothing
7924 **/
7925 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7926 {
7927 ioa_cfg->errors_logged++;
7928 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7929 }
7930
7931 /**
7932 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7933 * @ioa_cfg: ioa config struct
7934 *
7935 * Fetches the unit check buffer from the adapter by clocking the data
7936 * through the mailbox register.
7937 *
7938 * Return value:
7939 * nothing
7940 **/
7941 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7942 {
7943 unsigned long mailbox;
7944 struct ipr_hostrcb *hostrcb;
7945 struct ipr_uc_sdt sdt;
7946 int rc, length;
7947 u32 ioasc;
7948
7949 mailbox = readl(ioa_cfg->ioa_mailbox);
7950
7951 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7952 ipr_unit_check_no_data(ioa_cfg);
7953 return;
7954 }
7955
7956 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7957 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7958 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7959
7960 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7961 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7962 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7963 ipr_unit_check_no_data(ioa_cfg);
7964 return;
7965 }
7966
7967 /* Find length of the first sdt entry (UC buffer) */
7968 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7969 length = be32_to_cpu(sdt.entry[0].end_token);
7970 else
7971 length = (be32_to_cpu(sdt.entry[0].end_token) -
7972 be32_to_cpu(sdt.entry[0].start_token)) &
7973 IPR_FMT2_MBX_ADDR_MASK;
7974
7975 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7976 struct ipr_hostrcb, queue);
7977 list_del(&hostrcb->queue);
7978 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7979
7980 rc = ipr_get_ldump_data_section(ioa_cfg,
7981 be32_to_cpu(sdt.entry[0].start_token),
7982 (__be32 *)&hostrcb->hcam,
7983 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7984
7985 if (!rc) {
7986 ipr_handle_log_data(ioa_cfg, hostrcb);
7987 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7988 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7989 ioa_cfg->sdt_state == GET_DUMP)
7990 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7991 } else
7992 ipr_unit_check_no_data(ioa_cfg);
7993
7994 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7995 }
7996
7997 /**
7998 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7999 * @ipr_cmd: ipr command struct
8000 *
8001 * Description: This function will call to get the unit check buffer.
8002 *
8003 * Return value:
8004 * IPR_RC_JOB_RETURN
8005 **/
8006 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8007 {
8008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8009
8010 ENTER;
8011 ioa_cfg->ioa_unit_checked = 0;
8012 ipr_get_unit_check_buffer(ioa_cfg);
8013 ipr_cmd->job_step = ipr_reset_alert;
8014 ipr_reset_start_timer(ipr_cmd, 0);
8015
8016 LEAVE;
8017 return IPR_RC_JOB_RETURN;
8018 }
8019
8020 /**
8021 * ipr_reset_restore_cfg_space - Restore PCI config space.
8022 * @ipr_cmd: ipr command struct
8023 *
8024 * Description: This function restores the saved PCI config space of
8025 * the adapter, fails all outstanding ops back to the callers, and
8026 * fetches the dump/unit check if applicable to this reset.
8027 *
8028 * Return value:
8029 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8030 **/
8031 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8032 {
8033 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8034 u32 int_reg;
8035
8036 ENTER;
8037 ioa_cfg->pdev->state_saved = true;
8038 pci_restore_state(ioa_cfg->pdev);
8039
8040 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8041 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8042 return IPR_RC_JOB_CONTINUE;
8043 }
8044
8045 ipr_fail_all_ops(ioa_cfg);
8046
8047 if (ioa_cfg->sis64) {
8048 /* Set the adapter to the correct endian mode. */
8049 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8050 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8051 }
8052
8053 if (ioa_cfg->ioa_unit_checked) {
8054 if (ioa_cfg->sis64) {
8055 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8056 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8057 return IPR_RC_JOB_RETURN;
8058 } else {
8059 ioa_cfg->ioa_unit_checked = 0;
8060 ipr_get_unit_check_buffer(ioa_cfg);
8061 ipr_cmd->job_step = ipr_reset_alert;
8062 ipr_reset_start_timer(ipr_cmd, 0);
8063 return IPR_RC_JOB_RETURN;
8064 }
8065 }
8066
8067 if (ioa_cfg->in_ioa_bringdown) {
8068 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8069 } else {
8070 ipr_cmd->job_step = ipr_reset_enable_ioa;
8071
8072 if (GET_DUMP == ioa_cfg->sdt_state) {
8073 ioa_cfg->sdt_state = READ_DUMP;
8074 ioa_cfg->dump_timeout = 0;
8075 if (ioa_cfg->sis64)
8076 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8077 else
8078 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8079 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8080 schedule_work(&ioa_cfg->work_q);
8081 return IPR_RC_JOB_RETURN;
8082 }
8083 }
8084
8085 LEAVE;
8086 return IPR_RC_JOB_CONTINUE;
8087 }
8088
8089 /**
8090 * ipr_reset_bist_done - BIST has completed on the adapter.
8091 * @ipr_cmd: ipr command struct
8092 *
8093 * Description: Unblock config space and resume the reset process.
8094 *
8095 * Return value:
8096 * IPR_RC_JOB_CONTINUE
8097 **/
8098 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8099 {
8100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8101
8102 ENTER;
8103 if (ioa_cfg->cfg_locked)
8104 pci_cfg_access_unlock(ioa_cfg->pdev);
8105 ioa_cfg->cfg_locked = 0;
8106 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8107 LEAVE;
8108 return IPR_RC_JOB_CONTINUE;
8109 }
8110
8111 /**
8112 * ipr_reset_start_bist - Run BIST on the adapter.
8113 * @ipr_cmd: ipr command struct
8114 *
8115 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8116 *
8117 * Return value:
8118 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8119 **/
8120 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8121 {
8122 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8123 int rc = PCIBIOS_SUCCESSFUL;
8124
8125 ENTER;
8126 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8127 writel(IPR_UPROCI_SIS64_START_BIST,
8128 ioa_cfg->regs.set_uproc_interrupt_reg32);
8129 else
8130 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8131
8132 if (rc == PCIBIOS_SUCCESSFUL) {
8133 ipr_cmd->job_step = ipr_reset_bist_done;
8134 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8135 rc = IPR_RC_JOB_RETURN;
8136 } else {
8137 if (ioa_cfg->cfg_locked)
8138 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8139 ioa_cfg->cfg_locked = 0;
8140 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8141 rc = IPR_RC_JOB_CONTINUE;
8142 }
8143
8144 LEAVE;
8145 return rc;
8146 }
8147
8148 /**
8149 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8150 * @ipr_cmd: ipr command struct
8151 *
8152 * Description: This clears PCI reset to the adapter and delays two seconds.
8153 *
8154 * Return value:
8155 * IPR_RC_JOB_RETURN
8156 **/
8157 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8158 {
8159 ENTER;
8160 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8161 ipr_cmd->job_step = ipr_reset_bist_done;
8162 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8163 LEAVE;
8164 return IPR_RC_JOB_RETURN;
8165 }
8166
8167 /**
8168 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8169 * @ipr_cmd: ipr command struct
8170 *
8171 * Description: This asserts PCI reset to the adapter.
8172 *
8173 * Return value:
8174 * IPR_RC_JOB_RETURN
8175 **/
8176 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8177 {
8178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8179 struct pci_dev *pdev = ioa_cfg->pdev;
8180
8181 ENTER;
8182 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8183 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8184 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8185 LEAVE;
8186 return IPR_RC_JOB_RETURN;
8187 }
8188
8189 /**
8190 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8191 * @ipr_cmd: ipr command struct
8192 *
8193 * Description: This attempts to block config access to the IOA.
8194 *
8195 * Return value:
8196 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8197 **/
8198 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8199 {
8200 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8201 int rc = IPR_RC_JOB_CONTINUE;
8202
8203 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8204 ioa_cfg->cfg_locked = 1;
8205 ipr_cmd->job_step = ioa_cfg->reset;
8206 } else {
8207 if (ipr_cmd->u.time_left) {
8208 rc = IPR_RC_JOB_RETURN;
8209 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8210 ipr_reset_start_timer(ipr_cmd,
8211 IPR_CHECK_FOR_RESET_TIMEOUT);
8212 } else {
8213 ipr_cmd->job_step = ioa_cfg->reset;
8214 dev_err(&ioa_cfg->pdev->dev,
8215 "Timed out waiting to lock config access. Resetting anyway.\n");
8216 }
8217 }
8218
8219 return rc;
8220 }
8221
8222 /**
8223 * ipr_reset_block_config_access - Block config access to the IOA
8224 * @ipr_cmd: ipr command struct
8225 *
8226 * Description: This attempts to block config access to the IOA
8227 *
8228 * Return value:
8229 * IPR_RC_JOB_CONTINUE
8230 **/
8231 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8232 {
8233 ipr_cmd->ioa_cfg->cfg_locked = 0;
8234 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8235 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8236 return IPR_RC_JOB_CONTINUE;
8237 }
8238
8239 /**
8240 * ipr_reset_allowed - Query whether or not IOA can be reset
8241 * @ioa_cfg: ioa config struct
8242 *
8243 * Return value:
8244 * 0 if reset not allowed / non-zero if reset is allowed
8245 **/
8246 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8247 {
8248 volatile u32 temp_reg;
8249
8250 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8251 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8252 }
8253
8254 /**
8255 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8256 * @ipr_cmd: ipr command struct
8257 *
8258 * Description: This function waits for adapter permission to run BIST,
8259 * then runs BIST. If the adapter does not give permission after a
8260 * reasonable time, we will reset the adapter anyway. The impact of
8261 * resetting the adapter without warning the adapter is the risk of
8262 * losing the persistent error log on the adapter. If the adapter is
8263 * reset while it is writing to the flash on the adapter, the flash
8264 * segment will have bad ECC and be zeroed.
8265 *
8266 * Return value:
8267 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8268 **/
8269 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8270 {
8271 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8272 int rc = IPR_RC_JOB_RETURN;
8273
8274 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8275 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8276 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8277 } else {
8278 ipr_cmd->job_step = ipr_reset_block_config_access;
8279 rc = IPR_RC_JOB_CONTINUE;
8280 }
8281
8282 return rc;
8283 }
8284
8285 /**
8286 * ipr_reset_alert - Alert the adapter of a pending reset
8287 * @ipr_cmd: ipr command struct
8288 *
8289 * Description: This function alerts the adapter that it will be reset.
8290 * If memory space is not currently enabled, proceed directly
8291 * to running BIST on the adapter. The timer must always be started
8292 * so we guarantee we do not run BIST from ipr_isr.
8293 *
8294 * Return value:
8295 * IPR_RC_JOB_RETURN
8296 **/
8297 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8298 {
8299 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8300 u16 cmd_reg;
8301 int rc;
8302
8303 ENTER;
8304 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8305
8306 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8307 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8308 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8309 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8310 } else {
8311 ipr_cmd->job_step = ipr_reset_block_config_access;
8312 }
8313
8314 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8315 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8316
8317 LEAVE;
8318 return IPR_RC_JOB_RETURN;
8319 }
8320
8321 /**
8322 * ipr_reset_ucode_download_done - Microcode download completion
8323 * @ipr_cmd: ipr command struct
8324 *
8325 * Description: This function unmaps the microcode download buffer.
8326 *
8327 * Return value:
8328 * IPR_RC_JOB_CONTINUE
8329 **/
8330 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8331 {
8332 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8333 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8334
8335 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8336 sglist->num_sg, DMA_TO_DEVICE);
8337
8338 ipr_cmd->job_step = ipr_reset_alert;
8339 return IPR_RC_JOB_CONTINUE;
8340 }
8341
8342 /**
8343 * ipr_reset_ucode_download - Download microcode to the adapter
8344 * @ipr_cmd: ipr command struct
8345 *
8346 * Description: This function checks to see if it there is microcode
8347 * to download to the adapter. If there is, a download is performed.
8348 *
8349 * Return value:
8350 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8351 **/
8352 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8353 {
8354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8355 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8356
8357 ENTER;
8358 ipr_cmd->job_step = ipr_reset_alert;
8359
8360 if (!sglist)
8361 return IPR_RC_JOB_CONTINUE;
8362
8363 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8364 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8365 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8366 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8367 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8368 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8369 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8370
8371 if (ioa_cfg->sis64)
8372 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8373 else
8374 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8375 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8376
8377 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8378 IPR_WRITE_BUFFER_TIMEOUT);
8379
8380 LEAVE;
8381 return IPR_RC_JOB_RETURN;
8382 }
8383
8384 /**
8385 * ipr_reset_shutdown_ioa - Shutdown the adapter
8386 * @ipr_cmd: ipr command struct
8387 *
8388 * Description: This function issues an adapter shutdown of the
8389 * specified type to the specified adapter as part of the
8390 * adapter reset job.
8391 *
8392 * Return value:
8393 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8394 **/
8395 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8396 {
8397 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8398 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8399 unsigned long timeout;
8400 int rc = IPR_RC_JOB_CONTINUE;
8401
8402 ENTER;
8403 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8404 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8405 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8406 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8407 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8408 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8409
8410 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8411 timeout = IPR_SHUTDOWN_TIMEOUT;
8412 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8413 timeout = IPR_INTERNAL_TIMEOUT;
8414 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8415 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8416 else
8417 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8418
8419 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8420
8421 rc = IPR_RC_JOB_RETURN;
8422 ipr_cmd->job_step = ipr_reset_ucode_download;
8423 } else
8424 ipr_cmd->job_step = ipr_reset_alert;
8425
8426 LEAVE;
8427 return rc;
8428 }
8429
8430 /**
8431 * ipr_reset_ioa_job - Adapter reset job
8432 * @ipr_cmd: ipr command struct
8433 *
8434 * Description: This function is the job router for the adapter reset job.
8435 *
8436 * Return value:
8437 * none
8438 **/
8439 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8440 {
8441 u32 rc, ioasc;
8442 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8443
8444 do {
8445 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8446
8447 if (ioa_cfg->reset_cmd != ipr_cmd) {
8448 /*
8449 * We are doing nested adapter resets and this is
8450 * not the current reset job.
8451 */
8452 list_add_tail(&ipr_cmd->queue,
8453 &ipr_cmd->hrrq->hrrq_free_q);
8454 return;
8455 }
8456
8457 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8458 rc = ipr_cmd->job_step_failed(ipr_cmd);
8459 if (rc == IPR_RC_JOB_RETURN)
8460 return;
8461 }
8462
8463 ipr_reinit_ipr_cmnd(ipr_cmd);
8464 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8465 rc = ipr_cmd->job_step(ipr_cmd);
8466 } while (rc == IPR_RC_JOB_CONTINUE);
8467 }
8468
8469 /**
8470 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8471 * @ioa_cfg: ioa config struct
8472 * @job_step: first job step of reset job
8473 * @shutdown_type: shutdown type
8474 *
8475 * Description: This function will initiate the reset of the given adapter
8476 * starting at the selected job step.
8477 * If the caller needs to wait on the completion of the reset,
8478 * the caller must sleep on the reset_wait_q.
8479 *
8480 * Return value:
8481 * none
8482 **/
8483 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8484 int (*job_step) (struct ipr_cmnd *),
8485 enum ipr_shutdown_type shutdown_type)
8486 {
8487 struct ipr_cmnd *ipr_cmd;
8488 int i;
8489
8490 ioa_cfg->in_reset_reload = 1;
8491 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8492 spin_lock(&ioa_cfg->hrrq[i]._lock);
8493 ioa_cfg->hrrq[i].allow_cmds = 0;
8494 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8495 }
8496 wmb();
8497 scsi_block_requests(ioa_cfg->host);
8498
8499 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8500 ioa_cfg->reset_cmd = ipr_cmd;
8501 ipr_cmd->job_step = job_step;
8502 ipr_cmd->u.shutdown_type = shutdown_type;
8503
8504 ipr_reset_ioa_job(ipr_cmd);
8505 }
8506
8507 /**
8508 * ipr_initiate_ioa_reset - Initiate an adapter reset
8509 * @ioa_cfg: ioa config struct
8510 * @shutdown_type: shutdown type
8511 *
8512 * Description: This function will initiate the reset of the given adapter.
8513 * If the caller needs to wait on the completion of the reset,
8514 * the caller must sleep on the reset_wait_q.
8515 *
8516 * Return value:
8517 * none
8518 **/
8519 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8520 enum ipr_shutdown_type shutdown_type)
8521 {
8522 int i;
8523
8524 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8525 return;
8526
8527 if (ioa_cfg->in_reset_reload) {
8528 if (ioa_cfg->sdt_state == GET_DUMP)
8529 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8530 else if (ioa_cfg->sdt_state == READ_DUMP)
8531 ioa_cfg->sdt_state = ABORT_DUMP;
8532 }
8533
8534 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8535 dev_err(&ioa_cfg->pdev->dev,
8536 "IOA taken offline - error recovery failed\n");
8537
8538 ioa_cfg->reset_retries = 0;
8539 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8540 spin_lock(&ioa_cfg->hrrq[i]._lock);
8541 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8542 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8543 }
8544 wmb();
8545
8546 if (ioa_cfg->in_ioa_bringdown) {
8547 ioa_cfg->reset_cmd = NULL;
8548 ioa_cfg->in_reset_reload = 0;
8549 ipr_fail_all_ops(ioa_cfg);
8550 wake_up_all(&ioa_cfg->reset_wait_q);
8551
8552 spin_unlock_irq(ioa_cfg->host->host_lock);
8553 scsi_unblock_requests(ioa_cfg->host);
8554 spin_lock_irq(ioa_cfg->host->host_lock);
8555 return;
8556 } else {
8557 ioa_cfg->in_ioa_bringdown = 1;
8558 shutdown_type = IPR_SHUTDOWN_NONE;
8559 }
8560 }
8561
8562 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8563 shutdown_type);
8564 }
8565
8566 /**
8567 * ipr_reset_freeze - Hold off all I/O activity
8568 * @ipr_cmd: ipr command struct
8569 *
8570 * Description: If the PCI slot is frozen, hold off all I/O
8571 * activity; then, as soon as the slot is available again,
8572 * initiate an adapter reset.
8573 */
8574 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8575 {
8576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8577 int i;
8578
8579 /* Disallow new interrupts, avoid loop */
8580 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8581 spin_lock(&ioa_cfg->hrrq[i]._lock);
8582 ioa_cfg->hrrq[i].allow_interrupts = 0;
8583 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8584 }
8585 wmb();
8586 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8587 ipr_cmd->done = ipr_reset_ioa_job;
8588 return IPR_RC_JOB_RETURN;
8589 }
8590
8591 /**
8592 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8593 * @pdev: PCI device struct
8594 *
8595 * Description: This routine is called to tell us that the PCI bus
8596 * is down. Can't do anything here, except put the device driver
8597 * into a holding pattern, waiting for the PCI bus to come back.
8598 */
8599 static void ipr_pci_frozen(struct pci_dev *pdev)
8600 {
8601 unsigned long flags = 0;
8602 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8603
8604 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8605 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8606 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8607 }
8608
8609 /**
8610 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8611 * @pdev: PCI device struct
8612 *
8613 * Description: This routine is called by the pci error recovery
8614 * code after the PCI slot has been reset, just before we
8615 * should resume normal operations.
8616 */
8617 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8618 {
8619 unsigned long flags = 0;
8620 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8621
8622 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8623 if (ioa_cfg->needs_warm_reset)
8624 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8625 else
8626 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8627 IPR_SHUTDOWN_NONE);
8628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8629 return PCI_ERS_RESULT_RECOVERED;
8630 }
8631
8632 /**
8633 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8634 * @pdev: PCI device struct
8635 *
8636 * Description: This routine is called when the PCI bus has
8637 * permanently failed.
8638 */
8639 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8640 {
8641 unsigned long flags = 0;
8642 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8643 int i;
8644
8645 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8646 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8647 ioa_cfg->sdt_state = ABORT_DUMP;
8648 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8649 ioa_cfg->in_ioa_bringdown = 1;
8650 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8651 spin_lock(&ioa_cfg->hrrq[i]._lock);
8652 ioa_cfg->hrrq[i].allow_cmds = 0;
8653 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8654 }
8655 wmb();
8656 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8658 }
8659
8660 /**
8661 * ipr_pci_error_detected - Called when a PCI error is detected.
8662 * @pdev: PCI device struct
8663 * @state: PCI channel state
8664 *
8665 * Description: Called when a PCI error is detected.
8666 *
8667 * Return value:
8668 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8669 */
8670 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8671 pci_channel_state_t state)
8672 {
8673 switch (state) {
8674 case pci_channel_io_frozen:
8675 ipr_pci_frozen(pdev);
8676 return PCI_ERS_RESULT_NEED_RESET;
8677 case pci_channel_io_perm_failure:
8678 ipr_pci_perm_failure(pdev);
8679 return PCI_ERS_RESULT_DISCONNECT;
8680 break;
8681 default:
8682 break;
8683 }
8684 return PCI_ERS_RESULT_NEED_RESET;
8685 }
8686
8687 /**
8688 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8689 * @ioa_cfg: ioa cfg struct
8690 *
8691 * Description: This is the second phase of adapter intialization
8692 * This function takes care of initilizing the adapter to the point
8693 * where it can accept new commands.
8694
8695 * Return value:
8696 * 0 on success / -EIO on failure
8697 **/
8698 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8699 {
8700 int rc = 0;
8701 unsigned long host_lock_flags = 0;
8702
8703 ENTER;
8704 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8705 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8706 if (ioa_cfg->needs_hard_reset) {
8707 ioa_cfg->needs_hard_reset = 0;
8708 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8709 } else
8710 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8711 IPR_SHUTDOWN_NONE);
8712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8713 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8714 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8715
8716 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8717 rc = -EIO;
8718 } else if (ipr_invalid_adapter(ioa_cfg)) {
8719 if (!ipr_testmode)
8720 rc = -EIO;
8721
8722 dev_err(&ioa_cfg->pdev->dev,
8723 "Adapter not supported in this hardware configuration.\n");
8724 }
8725
8726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8727
8728 LEAVE;
8729 return rc;
8730 }
8731
8732 /**
8733 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8734 * @ioa_cfg: ioa config struct
8735 *
8736 * Return value:
8737 * none
8738 **/
8739 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8740 {
8741 int i;
8742
8743 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8744 if (ioa_cfg->ipr_cmnd_list[i])
8745 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8746 ioa_cfg->ipr_cmnd_list[i],
8747 ioa_cfg->ipr_cmnd_list_dma[i]);
8748
8749 ioa_cfg->ipr_cmnd_list[i] = NULL;
8750 }
8751
8752 if (ioa_cfg->ipr_cmd_pool)
8753 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8754
8755 kfree(ioa_cfg->ipr_cmnd_list);
8756 kfree(ioa_cfg->ipr_cmnd_list_dma);
8757 ioa_cfg->ipr_cmnd_list = NULL;
8758 ioa_cfg->ipr_cmnd_list_dma = NULL;
8759 ioa_cfg->ipr_cmd_pool = NULL;
8760 }
8761
8762 /**
8763 * ipr_free_mem - Frees memory allocated for an adapter
8764 * @ioa_cfg: ioa cfg struct
8765 *
8766 * Return value:
8767 * nothing
8768 **/
8769 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8770 {
8771 int i;
8772
8773 kfree(ioa_cfg->res_entries);
8774 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8775 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8776 ipr_free_cmd_blks(ioa_cfg);
8777
8778 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8779 pci_free_consistent(ioa_cfg->pdev,
8780 sizeof(u32) * ioa_cfg->hrrq[i].size,
8781 ioa_cfg->hrrq[i].host_rrq,
8782 ioa_cfg->hrrq[i].host_rrq_dma);
8783
8784 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8785 ioa_cfg->u.cfg_table,
8786 ioa_cfg->cfg_table_dma);
8787
8788 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8789 pci_free_consistent(ioa_cfg->pdev,
8790 sizeof(struct ipr_hostrcb),
8791 ioa_cfg->hostrcb[i],
8792 ioa_cfg->hostrcb_dma[i]);
8793 }
8794
8795 ipr_free_dump(ioa_cfg);
8796 kfree(ioa_cfg->trace);
8797 }
8798
8799 /**
8800 * ipr_free_all_resources - Free all allocated resources for an adapter.
8801 * @ipr_cmd: ipr command struct
8802 *
8803 * This function frees all allocated resources for the
8804 * specified adapter.
8805 *
8806 * Return value:
8807 * none
8808 **/
8809 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8810 {
8811 struct pci_dev *pdev = ioa_cfg->pdev;
8812
8813 ENTER;
8814 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8815 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8816 int i;
8817 for (i = 0; i < ioa_cfg->nvectors; i++)
8818 free_irq(ioa_cfg->vectors_info[i].vec,
8819 &ioa_cfg->hrrq[i]);
8820 } else
8821 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8822
8823 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8824 pci_disable_msi(pdev);
8825 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8826 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8827 pci_disable_msix(pdev);
8828 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8829 }
8830
8831 iounmap(ioa_cfg->hdw_dma_regs);
8832 pci_release_regions(pdev);
8833 ipr_free_mem(ioa_cfg);
8834 scsi_host_put(ioa_cfg->host);
8835 pci_disable_device(pdev);
8836 LEAVE;
8837 }
8838
8839 /**
8840 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8841 * @ioa_cfg: ioa config struct
8842 *
8843 * Return value:
8844 * 0 on success / -ENOMEM on allocation failure
8845 **/
8846 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8847 {
8848 struct ipr_cmnd *ipr_cmd;
8849 struct ipr_ioarcb *ioarcb;
8850 dma_addr_t dma_addr;
8851 int i, entries_each_hrrq, hrrq_id = 0;
8852
8853 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8854 sizeof(struct ipr_cmnd), 512, 0);
8855
8856 if (!ioa_cfg->ipr_cmd_pool)
8857 return -ENOMEM;
8858
8859 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8860 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8861
8862 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8863 ipr_free_cmd_blks(ioa_cfg);
8864 return -ENOMEM;
8865 }
8866
8867 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8868 if (ioa_cfg->hrrq_num > 1) {
8869 if (i == 0) {
8870 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8871 ioa_cfg->hrrq[i].min_cmd_id = 0;
8872 ioa_cfg->hrrq[i].max_cmd_id =
8873 (entries_each_hrrq - 1);
8874 } else {
8875 entries_each_hrrq =
8876 IPR_NUM_BASE_CMD_BLKS/
8877 (ioa_cfg->hrrq_num - 1);
8878 ioa_cfg->hrrq[i].min_cmd_id =
8879 IPR_NUM_INTERNAL_CMD_BLKS +
8880 (i - 1) * entries_each_hrrq;
8881 ioa_cfg->hrrq[i].max_cmd_id =
8882 (IPR_NUM_INTERNAL_CMD_BLKS +
8883 i * entries_each_hrrq - 1);
8884 }
8885 } else {
8886 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8887 ioa_cfg->hrrq[i].min_cmd_id = 0;
8888 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8889 }
8890 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8891 }
8892
8893 BUG_ON(ioa_cfg->hrrq_num == 0);
8894
8895 i = IPR_NUM_CMD_BLKS -
8896 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8897 if (i > 0) {
8898 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8899 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8900 }
8901
8902 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8903 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8904
8905 if (!ipr_cmd) {
8906 ipr_free_cmd_blks(ioa_cfg);
8907 return -ENOMEM;
8908 }
8909
8910 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8911 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8912 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8913
8914 ioarcb = &ipr_cmd->ioarcb;
8915 ipr_cmd->dma_addr = dma_addr;
8916 if (ioa_cfg->sis64)
8917 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8918 else
8919 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8920
8921 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8922 if (ioa_cfg->sis64) {
8923 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8924 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8925 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8926 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8927 } else {
8928 ioarcb->write_ioadl_addr =
8929 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8930 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8931 ioarcb->ioasa_host_pci_addr =
8932 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8933 }
8934 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8935 ipr_cmd->cmd_index = i;
8936 ipr_cmd->ioa_cfg = ioa_cfg;
8937 ipr_cmd->sense_buffer_dma = dma_addr +
8938 offsetof(struct ipr_cmnd, sense_buffer);
8939
8940 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8941 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8942 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8943 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8944 hrrq_id++;
8945 }
8946
8947 return 0;
8948 }
8949
8950 /**
8951 * ipr_alloc_mem - Allocate memory for an adapter
8952 * @ioa_cfg: ioa config struct
8953 *
8954 * Return value:
8955 * 0 on success / non-zero for error
8956 **/
8957 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8958 {
8959 struct pci_dev *pdev = ioa_cfg->pdev;
8960 int i, rc = -ENOMEM;
8961
8962 ENTER;
8963 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8964 ioa_cfg->max_devs_supported, GFP_KERNEL);
8965
8966 if (!ioa_cfg->res_entries)
8967 goto out;
8968
8969 if (ioa_cfg->sis64) {
8970 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8971 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8972 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8973 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8974 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8975 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8976
8977 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8978 || !ioa_cfg->vset_ids)
8979 goto out_free_res_entries;
8980 }
8981
8982 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8983 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8984 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8985 }
8986
8987 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8988 sizeof(struct ipr_misc_cbs),
8989 &ioa_cfg->vpd_cbs_dma);
8990
8991 if (!ioa_cfg->vpd_cbs)
8992 goto out_free_res_entries;
8993
8994 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8995 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
8997 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
8998 if (i == 0)
8999 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9000 else
9001 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9002 }
9003
9004 if (ipr_alloc_cmd_blks(ioa_cfg))
9005 goto out_free_vpd_cbs;
9006
9007 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9008 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9009 sizeof(u32) * ioa_cfg->hrrq[i].size,
9010 &ioa_cfg->hrrq[i].host_rrq_dma);
9011
9012 if (!ioa_cfg->hrrq[i].host_rrq) {
9013 while (--i > 0)
9014 pci_free_consistent(pdev,
9015 sizeof(u32) * ioa_cfg->hrrq[i].size,
9016 ioa_cfg->hrrq[i].host_rrq,
9017 ioa_cfg->hrrq[i].host_rrq_dma);
9018 goto out_ipr_free_cmd_blocks;
9019 }
9020 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9021 }
9022
9023 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9024 ioa_cfg->cfg_table_size,
9025 &ioa_cfg->cfg_table_dma);
9026
9027 if (!ioa_cfg->u.cfg_table)
9028 goto out_free_host_rrq;
9029
9030 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9031 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9032 sizeof(struct ipr_hostrcb),
9033 &ioa_cfg->hostrcb_dma[i]);
9034
9035 if (!ioa_cfg->hostrcb[i])
9036 goto out_free_hostrcb_dma;
9037
9038 ioa_cfg->hostrcb[i]->hostrcb_dma =
9039 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9040 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9041 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9042 }
9043
9044 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9045 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9046
9047 if (!ioa_cfg->trace)
9048 goto out_free_hostrcb_dma;
9049
9050 rc = 0;
9051 out:
9052 LEAVE;
9053 return rc;
9054
9055 out_free_hostrcb_dma:
9056 while (i-- > 0) {
9057 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9058 ioa_cfg->hostrcb[i],
9059 ioa_cfg->hostrcb_dma[i]);
9060 }
9061 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9062 ioa_cfg->u.cfg_table,
9063 ioa_cfg->cfg_table_dma);
9064 out_free_host_rrq:
9065 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9066 pci_free_consistent(pdev,
9067 sizeof(u32) * ioa_cfg->hrrq[i].size,
9068 ioa_cfg->hrrq[i].host_rrq,
9069 ioa_cfg->hrrq[i].host_rrq_dma);
9070 }
9071 out_ipr_free_cmd_blocks:
9072 ipr_free_cmd_blks(ioa_cfg);
9073 out_free_vpd_cbs:
9074 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9075 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9076 out_free_res_entries:
9077 kfree(ioa_cfg->res_entries);
9078 kfree(ioa_cfg->target_ids);
9079 kfree(ioa_cfg->array_ids);
9080 kfree(ioa_cfg->vset_ids);
9081 goto out;
9082 }
9083
9084 /**
9085 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9086 * @ioa_cfg: ioa config struct
9087 *
9088 * Return value:
9089 * none
9090 **/
9091 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9092 {
9093 int i;
9094
9095 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9096 ioa_cfg->bus_attr[i].bus = i;
9097 ioa_cfg->bus_attr[i].qas_enabled = 0;
9098 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9099 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9100 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9101 else
9102 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9103 }
9104 }
9105
9106 /**
9107 * ipr_init_ioa_cfg - Initialize IOA config struct
9108 * @ioa_cfg: ioa config struct
9109 * @host: scsi host struct
9110 * @pdev: PCI dev struct
9111 *
9112 * Return value:
9113 * none
9114 **/
9115 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9116 struct Scsi_Host *host, struct pci_dev *pdev)
9117 {
9118 const struct ipr_interrupt_offsets *p;
9119 struct ipr_interrupts *t;
9120 void __iomem *base;
9121
9122 ioa_cfg->host = host;
9123 ioa_cfg->pdev = pdev;
9124 ioa_cfg->log_level = ipr_log_level;
9125 ioa_cfg->doorbell = IPR_DOORBELL;
9126 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9127 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9128 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9129 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9130 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9131 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9132
9133 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9134 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9135 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9136 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9137 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9138 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9139 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9140 ioa_cfg->sdt_state = INACTIVE;
9141
9142 ipr_initialize_bus_attr(ioa_cfg);
9143 ioa_cfg->max_devs_supported = ipr_max_devs;
9144
9145 if (ioa_cfg->sis64) {
9146 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9147 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9148 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9149 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9150 } else {
9151 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9152 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9153 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9154 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9155 }
9156 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9157 host->unique_id = host->host_no;
9158 host->max_cmd_len = IPR_MAX_CDB_LEN;
9159 host->can_queue = ioa_cfg->max_cmds;
9160 pci_set_drvdata(pdev, ioa_cfg);
9161
9162 p = &ioa_cfg->chip_cfg->regs;
9163 t = &ioa_cfg->regs;
9164 base = ioa_cfg->hdw_dma_regs;
9165
9166 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9167 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9168 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9169 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9170 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9171 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9172 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9173 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9174 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9175 t->ioarrin_reg = base + p->ioarrin_reg;
9176 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9177 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9178 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9179 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9180 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9181 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9182
9183 if (ioa_cfg->sis64) {
9184 t->init_feedback_reg = base + p->init_feedback_reg;
9185 t->dump_addr_reg = base + p->dump_addr_reg;
9186 t->dump_data_reg = base + p->dump_data_reg;
9187 t->endian_swap_reg = base + p->endian_swap_reg;
9188 }
9189 }
9190
9191 /**
9192 * ipr_get_chip_info - Find adapter chip information
9193 * @dev_id: PCI device id struct
9194 *
9195 * Return value:
9196 * ptr to chip information on success / NULL on failure
9197 **/
9198 static const struct ipr_chip_t *
9199 ipr_get_chip_info(const struct pci_device_id *dev_id)
9200 {
9201 int i;
9202
9203 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9204 if (ipr_chip[i].vendor == dev_id->vendor &&
9205 ipr_chip[i].device == dev_id->device)
9206 return &ipr_chip[i];
9207 return NULL;
9208 }
9209
9210 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9211 {
9212 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9213 int i, err, vectors;
9214
9215 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9216 entries[i].entry = i;
9217
9218 vectors = ipr_number_of_msix;
9219
9220 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9221 vectors = err;
9222
9223 if (err < 0) {
9224 pci_disable_msix(ioa_cfg->pdev);
9225 return err;
9226 }
9227
9228 if (!err) {
9229 for (i = 0; i < vectors; i++)
9230 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9231 ioa_cfg->nvectors = vectors;
9232 }
9233
9234 return err;
9235 }
9236
9237 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9238 {
9239 int i, err, vectors;
9240
9241 vectors = ipr_number_of_msix;
9242
9243 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9244 vectors = err;
9245
9246 if (err < 0) {
9247 pci_disable_msi(ioa_cfg->pdev);
9248 return err;
9249 }
9250
9251 if (!err) {
9252 for (i = 0; i < vectors; i++)
9253 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9254 ioa_cfg->nvectors = vectors;
9255 }
9256
9257 return err;
9258 }
9259
9260 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9261 {
9262 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9263
9264 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9265 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9266 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9267 ioa_cfg->vectors_info[vec_idx].
9268 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9269 }
9270 }
9271
9272 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9273 {
9274 int i, rc;
9275
9276 for (i = 1; i < ioa_cfg->nvectors; i++) {
9277 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9278 ipr_isr_mhrrq,
9279 0,
9280 ioa_cfg->vectors_info[i].desc,
9281 &ioa_cfg->hrrq[i]);
9282 if (rc) {
9283 while (--i >= 0)
9284 free_irq(ioa_cfg->vectors_info[i].vec,
9285 &ioa_cfg->hrrq[i]);
9286 return rc;
9287 }
9288 }
9289 return 0;
9290 }
9291
9292 /**
9293 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9294 * @pdev: PCI device struct
9295 *
9296 * Description: Simply set the msi_received flag to 1 indicating that
9297 * Message Signaled Interrupts are supported.
9298 *
9299 * Return value:
9300 * 0 on success / non-zero on failure
9301 **/
9302 static irqreturn_t ipr_test_intr(int irq, void *devp)
9303 {
9304 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9305 unsigned long lock_flags = 0;
9306 irqreturn_t rc = IRQ_HANDLED;
9307
9308 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9310
9311 ioa_cfg->msi_received = 1;
9312 wake_up(&ioa_cfg->msi_wait_q);
9313
9314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9315 return rc;
9316 }
9317
9318 /**
9319 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9320 * @pdev: PCI device struct
9321 *
9322 * Description: The return value from pci_enable_msi() can not always be
9323 * trusted. This routine sets up and initiates a test interrupt to determine
9324 * if the interrupt is received via the ipr_test_intr() service routine.
9325 * If the tests fails, the driver will fall back to LSI.
9326 *
9327 * Return value:
9328 * 0 on success / non-zero on failure
9329 **/
9330 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9331 {
9332 int rc;
9333 volatile u32 int_reg;
9334 unsigned long lock_flags = 0;
9335
9336 ENTER;
9337
9338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9339 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9340 ioa_cfg->msi_received = 0;
9341 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9342 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9343 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9345
9346 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9347 if (rc) {
9348 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9349 return rc;
9350 } else if (ipr_debug)
9351 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9352
9353 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9354 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9355 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9357 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9358
9359 if (!ioa_cfg->msi_received) {
9360 /* MSI test failed */
9361 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9362 rc = -EOPNOTSUPP;
9363 } else if (ipr_debug)
9364 dev_info(&pdev->dev, "MSI test succeeded.\n");
9365
9366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9367
9368 free_irq(pdev->irq, ioa_cfg);
9369
9370 LEAVE;
9371
9372 return rc;
9373 }
9374
9375 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9376 * @pdev: PCI device struct
9377 * @dev_id: PCI device id struct
9378 *
9379 * Return value:
9380 * 0 on success / non-zero on failure
9381 **/
9382 static int ipr_probe_ioa(struct pci_dev *pdev,
9383 const struct pci_device_id *dev_id)
9384 {
9385 struct ipr_ioa_cfg *ioa_cfg;
9386 struct Scsi_Host *host;
9387 unsigned long ipr_regs_pci;
9388 void __iomem *ipr_regs;
9389 int rc = PCIBIOS_SUCCESSFUL;
9390 volatile u32 mask, uproc, interrupts;
9391 unsigned long lock_flags;
9392
9393 ENTER;
9394
9395 if ((rc = pci_enable_device(pdev))) {
9396 dev_err(&pdev->dev, "Cannot enable adapter\n");
9397 goto out;
9398 }
9399
9400 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9401
9402 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9403
9404 if (!host) {
9405 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9406 rc = -ENOMEM;
9407 goto out_disable;
9408 }
9409
9410 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9411 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9412 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9413
9414 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9415
9416 if (!ioa_cfg->ipr_chip) {
9417 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9418 dev_id->vendor, dev_id->device);
9419 goto out_scsi_host_put;
9420 }
9421
9422 /* set SIS 32 or SIS 64 */
9423 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9424 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9425 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9426 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9427
9428 if (ipr_transop_timeout)
9429 ioa_cfg->transop_timeout = ipr_transop_timeout;
9430 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9431 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9432 else
9433 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9434
9435 ioa_cfg->revid = pdev->revision;
9436
9437 ipr_regs_pci = pci_resource_start(pdev, 0);
9438
9439 rc = pci_request_regions(pdev, IPR_NAME);
9440 if (rc < 0) {
9441 dev_err(&pdev->dev,
9442 "Couldn't register memory range of registers\n");
9443 goto out_scsi_host_put;
9444 }
9445
9446 ipr_regs = pci_ioremap_bar(pdev, 0);
9447
9448 if (!ipr_regs) {
9449 dev_err(&pdev->dev,
9450 "Couldn't map memory range of registers\n");
9451 rc = -ENOMEM;
9452 goto out_release_regions;
9453 }
9454
9455 ioa_cfg->hdw_dma_regs = ipr_regs;
9456 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9457 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9458
9459 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9460
9461 pci_set_master(pdev);
9462
9463 if (ioa_cfg->sis64) {
9464 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9465 if (rc < 0) {
9466 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9467 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9468 }
9469
9470 } else
9471 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9472
9473 if (rc < 0) {
9474 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9475 goto cleanup_nomem;
9476 }
9477
9478 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9479 ioa_cfg->chip_cfg->cache_line_size);
9480
9481 if (rc != PCIBIOS_SUCCESSFUL) {
9482 dev_err(&pdev->dev, "Write of cache line size failed\n");
9483 rc = -EIO;
9484 goto cleanup_nomem;
9485 }
9486
9487 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9488 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9489 IPR_MAX_MSIX_VECTORS);
9490 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9491 }
9492
9493 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9494 ipr_enable_msix(ioa_cfg) == 0)
9495 ioa_cfg->intr_flag = IPR_USE_MSIX;
9496 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9497 ipr_enable_msi(ioa_cfg) == 0)
9498 ioa_cfg->intr_flag = IPR_USE_MSI;
9499 else {
9500 ioa_cfg->intr_flag = IPR_USE_LSI;
9501 ioa_cfg->nvectors = 1;
9502 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9503 }
9504
9505 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9506 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9507 rc = ipr_test_msi(ioa_cfg, pdev);
9508 if (rc == -EOPNOTSUPP) {
9509 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9510 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9511 pci_disable_msi(pdev);
9512 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9513 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9514 pci_disable_msix(pdev);
9515 }
9516
9517 ioa_cfg->intr_flag = IPR_USE_LSI;
9518 ioa_cfg->nvectors = 1;
9519 }
9520 else if (rc)
9521 goto out_msi_disable;
9522 else {
9523 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9524 dev_info(&pdev->dev,
9525 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9526 ioa_cfg->nvectors, pdev->irq);
9527 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9528 dev_info(&pdev->dev,
9529 "Request for %d MSIXs succeeded.",
9530 ioa_cfg->nvectors);
9531 }
9532 }
9533
9534 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9535 (unsigned int)num_online_cpus(),
9536 (unsigned int)IPR_MAX_HRRQ_NUM);
9537
9538 /* Save away PCI config space for use following IOA reset */
9539 rc = pci_save_state(pdev);
9540
9541 if (rc != PCIBIOS_SUCCESSFUL) {
9542 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9543 rc = -EIO;
9544 goto out_msi_disable;
9545 }
9546
9547 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9548 goto out_msi_disable;
9549
9550 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9551 goto out_msi_disable;
9552
9553 if (ioa_cfg->sis64)
9554 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9555 + ((sizeof(struct ipr_config_table_entry64)
9556 * ioa_cfg->max_devs_supported)));
9557 else
9558 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9559 + ((sizeof(struct ipr_config_table_entry)
9560 * ioa_cfg->max_devs_supported)));
9561
9562 rc = ipr_alloc_mem(ioa_cfg);
9563 if (rc < 0) {
9564 dev_err(&pdev->dev,
9565 "Couldn't allocate enough memory for device driver!\n");
9566 goto out_msi_disable;
9567 }
9568
9569 /*
9570 * If HRRQ updated interrupt is not masked, or reset alert is set,
9571 * the card is in an unknown state and needs a hard reset
9572 */
9573 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9574 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9575 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9576 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9577 ioa_cfg->needs_hard_reset = 1;
9578 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9579 ioa_cfg->needs_hard_reset = 1;
9580 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9581 ioa_cfg->ioa_unit_checked = 1;
9582
9583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9584 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9586
9587 if (ioa_cfg->intr_flag == IPR_USE_MSI
9588 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9589 name_msi_vectors(ioa_cfg);
9590 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9591 0,
9592 ioa_cfg->vectors_info[0].desc,
9593 &ioa_cfg->hrrq[0]);
9594 if (!rc)
9595 rc = ipr_request_other_msi_irqs(ioa_cfg);
9596 } else {
9597 rc = request_irq(pdev->irq, ipr_isr,
9598 IRQF_SHARED,
9599 IPR_NAME, &ioa_cfg->hrrq[0]);
9600 }
9601 if (rc) {
9602 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9603 pdev->irq, rc);
9604 goto cleanup_nolog;
9605 }
9606
9607 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9608 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9609 ioa_cfg->needs_warm_reset = 1;
9610 ioa_cfg->reset = ipr_reset_slot_reset;
9611 } else
9612 ioa_cfg->reset = ipr_reset_start_bist;
9613
9614 spin_lock(&ipr_driver_lock);
9615 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9616 spin_unlock(&ipr_driver_lock);
9617
9618 LEAVE;
9619 out:
9620 return rc;
9621
9622 cleanup_nolog:
9623 ipr_free_mem(ioa_cfg);
9624 out_msi_disable:
9625 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9626 pci_disable_msi(pdev);
9627 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9628 pci_disable_msix(pdev);
9629 cleanup_nomem:
9630 iounmap(ipr_regs);
9631 out_release_regions:
9632 pci_release_regions(pdev);
9633 out_scsi_host_put:
9634 scsi_host_put(host);
9635 out_disable:
9636 pci_disable_device(pdev);
9637 goto out;
9638 }
9639
9640 /**
9641 * ipr_scan_vsets - Scans for VSET devices
9642 * @ioa_cfg: ioa config struct
9643 *
9644 * Description: Since the VSET resources do not follow SAM in that we can have
9645 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9646 *
9647 * Return value:
9648 * none
9649 **/
9650 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9651 {
9652 int target, lun;
9653
9654 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9655 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9656 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9657 }
9658
9659 /**
9660 * ipr_initiate_ioa_bringdown - Bring down an adapter
9661 * @ioa_cfg: ioa config struct
9662 * @shutdown_type: shutdown type
9663 *
9664 * Description: This function will initiate bringing down the adapter.
9665 * This consists of issuing an IOA shutdown to the adapter
9666 * to flush the cache, and running BIST.
9667 * If the caller needs to wait on the completion of the reset,
9668 * the caller must sleep on the reset_wait_q.
9669 *
9670 * Return value:
9671 * none
9672 **/
9673 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9674 enum ipr_shutdown_type shutdown_type)
9675 {
9676 ENTER;
9677 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9678 ioa_cfg->sdt_state = ABORT_DUMP;
9679 ioa_cfg->reset_retries = 0;
9680 ioa_cfg->in_ioa_bringdown = 1;
9681 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9682 LEAVE;
9683 }
9684
9685 /**
9686 * __ipr_remove - Remove a single adapter
9687 * @pdev: pci device struct
9688 *
9689 * Adapter hot plug remove entry point.
9690 *
9691 * Return value:
9692 * none
9693 **/
9694 static void __ipr_remove(struct pci_dev *pdev)
9695 {
9696 unsigned long host_lock_flags = 0;
9697 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9698 ENTER;
9699
9700 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9701 while (ioa_cfg->in_reset_reload) {
9702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9703 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9704 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9705 }
9706
9707 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9708
9709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9710 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9711 flush_work(&ioa_cfg->work_q);
9712 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9713
9714 spin_lock(&ipr_driver_lock);
9715 list_del(&ioa_cfg->queue);
9716 spin_unlock(&ipr_driver_lock);
9717
9718 if (ioa_cfg->sdt_state == ABORT_DUMP)
9719 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9721
9722 ipr_free_all_resources(ioa_cfg);
9723
9724 LEAVE;
9725 }
9726
9727 /**
9728 * ipr_remove - IOA hot plug remove entry point
9729 * @pdev: pci device struct
9730 *
9731 * Adapter hot plug remove entry point.
9732 *
9733 * Return value:
9734 * none
9735 **/
9736 static void ipr_remove(struct pci_dev *pdev)
9737 {
9738 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9739
9740 ENTER;
9741
9742 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9743 &ipr_trace_attr);
9744 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9745 &ipr_dump_attr);
9746 scsi_remove_host(ioa_cfg->host);
9747
9748 __ipr_remove(pdev);
9749
9750 LEAVE;
9751 }
9752
9753 /**
9754 * ipr_probe - Adapter hot plug add entry point
9755 *
9756 * Return value:
9757 * 0 on success / non-zero on failure
9758 **/
9759 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9760 {
9761 struct ipr_ioa_cfg *ioa_cfg;
9762 int rc, i;
9763
9764 rc = ipr_probe_ioa(pdev, dev_id);
9765
9766 if (rc)
9767 return rc;
9768
9769 ioa_cfg = pci_get_drvdata(pdev);
9770 rc = ipr_probe_ioa_part2(ioa_cfg);
9771
9772 if (rc) {
9773 __ipr_remove(pdev);
9774 return rc;
9775 }
9776
9777 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9778
9779 if (rc) {
9780 __ipr_remove(pdev);
9781 return rc;
9782 }
9783
9784 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9785 &ipr_trace_attr);
9786
9787 if (rc) {
9788 scsi_remove_host(ioa_cfg->host);
9789 __ipr_remove(pdev);
9790 return rc;
9791 }
9792
9793 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9794 &ipr_dump_attr);
9795
9796 if (rc) {
9797 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9798 &ipr_trace_attr);
9799 scsi_remove_host(ioa_cfg->host);
9800 __ipr_remove(pdev);
9801 return rc;
9802 }
9803
9804 scsi_scan_host(ioa_cfg->host);
9805 ipr_scan_vsets(ioa_cfg);
9806 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9807 ioa_cfg->allow_ml_add_del = 1;
9808 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9809 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9810
9811 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9812 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9813 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9814 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9815 ioa_cfg->iopoll_weight, ipr_iopoll);
9816 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9817 }
9818 }
9819
9820 schedule_work(&ioa_cfg->work_q);
9821 return 0;
9822 }
9823
9824 /**
9825 * ipr_shutdown - Shutdown handler.
9826 * @pdev: pci device struct
9827 *
9828 * This function is invoked upon system shutdown/reboot. It will issue
9829 * an adapter shutdown to the adapter to flush the write cache.
9830 *
9831 * Return value:
9832 * none
9833 **/
9834 static void ipr_shutdown(struct pci_dev *pdev)
9835 {
9836 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9837 unsigned long lock_flags = 0;
9838 int i;
9839
9840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9841 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9842 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9843 ioa_cfg->iopoll_weight = 0;
9844 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9845 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9846 }
9847
9848 while (ioa_cfg->in_reset_reload) {
9849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9850 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9851 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9852 }
9853
9854 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9856 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9857 }
9858
9859 static struct pci_device_id ipr_pci_table[] = {
9860 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9861 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9862 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9863 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9864 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9865 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9866 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9867 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9868 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9869 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9870 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9872 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9873 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9874 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9875 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9876 IPR_USE_LONG_TRANSOP_TIMEOUT },
9877 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9878 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9879 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9880 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9881 IPR_USE_LONG_TRANSOP_TIMEOUT },
9882 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9884 IPR_USE_LONG_TRANSOP_TIMEOUT },
9885 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9887 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9888 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9889 IPR_USE_LONG_TRANSOP_TIMEOUT},
9890 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9892 IPR_USE_LONG_TRANSOP_TIMEOUT },
9893 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9894 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9895 IPR_USE_LONG_TRANSOP_TIMEOUT },
9896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9902 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9903 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9904 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9905 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9906 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9907 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9908 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9909 IPR_USE_LONG_TRANSOP_TIMEOUT },
9910 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9911 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9912 IPR_USE_LONG_TRANSOP_TIMEOUT },
9913 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9914 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9915 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9916 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9917 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9918 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9919 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9920 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9921 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9922 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9923 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9924 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9925 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9926 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9927 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9928 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9929 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9930 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9931 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9932 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9933 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9934 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9935 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9936 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9937 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9938 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9939 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9940 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9941 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9942 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9943 { }
9944 };
9945 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9946
9947 static const struct pci_error_handlers ipr_err_handler = {
9948 .error_detected = ipr_pci_error_detected,
9949 .slot_reset = ipr_pci_slot_reset,
9950 };
9951
9952 static struct pci_driver ipr_driver = {
9953 .name = IPR_NAME,
9954 .id_table = ipr_pci_table,
9955 .probe = ipr_probe,
9956 .remove = ipr_remove,
9957 .shutdown = ipr_shutdown,
9958 .err_handler = &ipr_err_handler,
9959 };
9960
9961 /**
9962 * ipr_halt_done - Shutdown prepare completion
9963 *
9964 * Return value:
9965 * none
9966 **/
9967 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9968 {
9969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9970 }
9971
9972 /**
9973 * ipr_halt - Issue shutdown prepare to all adapters
9974 *
9975 * Return value:
9976 * NOTIFY_OK on success / NOTIFY_DONE on failure
9977 **/
9978 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9979 {
9980 struct ipr_cmnd *ipr_cmd;
9981 struct ipr_ioa_cfg *ioa_cfg;
9982 unsigned long flags = 0;
9983
9984 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9985 return NOTIFY_DONE;
9986
9987 spin_lock(&ipr_driver_lock);
9988
9989 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9990 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9991 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
9992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9993 continue;
9994 }
9995
9996 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9997 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9998 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9999 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10000 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10001
10002 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10004 }
10005 spin_unlock(&ipr_driver_lock);
10006
10007 return NOTIFY_OK;
10008 }
10009
10010 static struct notifier_block ipr_notifier = {
10011 ipr_halt, NULL, 0
10012 };
10013
10014 /**
10015 * ipr_init - Module entry point
10016 *
10017 * Return value:
10018 * 0 on success / negative value on failure
10019 **/
10020 static int __init ipr_init(void)
10021 {
10022 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10023 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10024
10025 register_reboot_notifier(&ipr_notifier);
10026 return pci_register_driver(&ipr_driver);
10027 }
10028
10029 /**
10030 * ipr_exit - Module unload
10031 *
10032 * Module unload entry point.
10033 *
10034 * Return value:
10035 * none
10036 **/
10037 static void __exit ipr_exit(void)
10038 {
10039 unregister_reboot_notifier(&ipr_notifier);
10040 pci_unregister_driver(&ipr_driver);
10041 }
10042
10043 module_init(ipr_init);
10044 module_exit(ipr_exit);
This page took 0.638527 seconds and 5 git commands to generate.