[SCSI] ipr: Add new CCIN definition for Grand Canyon support
[deliverable/linux.git] / drivers / scsi / ipr.c
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90 * Global Data
91 */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 .mailbox = 0x0042C,
108 .max_cmds = 100,
109 .cache_line_size = 0x20,
110 .clear_isr = 1,
111 .iopoll_weight = 0,
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
133 .max_cmds = 100,
134 .cache_line_size = 0x20,
135 .clear_isr = 1,
136 .iopoll_weight = 0,
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
154 }
155 },
156 { /* CRoC */
157 .mailbox = 0x00044,
158 .max_cmds = 1000,
159 .cache_line_size = 0x20,
160 .clear_isr = 0,
161 .iopoll_weight = 64,
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
183 }
184 },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /* A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
284 {0x02040100, 0, 0,
285 "Logical Unit in process of becoming ready"},
286 {0x02040200, 0, 0,
287 "Initializing command required"},
288 {0x02040400, 0, 0,
289 "34FF: Disk device format in progress"},
290 {0x02040C00, 0, 0,
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
294 {0x023F0000, 0, 0,
295 "Synchronization required"},
296 {0x02408500, 0, 0,
297 "IOA microcode download required"},
298 {0x02408600, 0, 0,
299 "Device bus connection is prohibited by host"},
300 {0x024E0000, 0, 0,
301 "No ready, IOA shutdown"},
302 {0x025A0000, 0, 0,
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305 "3020: Storage subsystem configuration error"},
306 {0x03110B00, 0, 0,
307 "FFF5: Medium error, data unreadable, recommend reassign"},
308 {0x03110C00, 0, 0,
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3109: IOA timed out a device command"},
318 {0x04088000, 0, 0,
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355 "8150: PCI bus error"},
356 {0x04430000, 1, 0,
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8151: IOA microcode error"},
366 {0x04448500, 0, 0,
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8157: IOA error requiring IOA reset to recover"},
370 {0x04448700, 0, 0,
371 "ATA device status error"},
372 {0x04490000, 0, 0,
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "FFF4: Command to logical unit failed"},
402 {0x05240000, 1, 0,
403 "Illegal request, invalid request type or request packet"},
404 {0x05250000, 0, 0,
405 "Illegal request, invalid resource handle"},
406 {0x05258000, 0, 0,
407 "Illegal request, commands not allowed to this device"},
408 {0x05258100, 0, 0,
409 "Illegal request, command not allowed to a secondary adapter"},
410 {0x05258200, 0, 0,
411 "Illegal request, command not allowed to a non-optimized resource"},
412 {0x05260000, 0, 0,
413 "Illegal request, invalid field in parameter list"},
414 {0x05260100, 0, 0,
415 "Illegal request, parameter not supported"},
416 {0x05260200, 0, 0,
417 "Illegal request, parameter value invalid"},
418 {0x052C0000, 0, 0,
419 "Illegal request, command sequence error"},
420 {0x052C8000, 1, 0,
421 "Illegal request, dual adapter support not enabled"},
422 {0x052C8100, 1, 0,
423 "Illegal request, another cable connector was physically disabled"},
424 {0x054E8000, 1, 0,
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "FFFB: SCSI bus was reset"},
438 {0x06290500, 0, 0,
439 "FFFE: SCSI bus transition to single ended"},
440 {0x06290600, 0, 0,
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497 "4060: Multipath redundancy level got worse"},
498 {0x07270000, 0, 0,
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9063: Maximum number of functional arrays has been exceeded"},
540 {0x07279A00, 0, 0,
541 "Data protect, other volume set problem"},
542 {0x0B260000, 0, 0,
543 "Aborted command, invalid descriptor"},
544 {0x0B3F9000, 0, 0,
545 "Target operating conditions have changed, dual adapter takeover"},
546 {0x0B530200, 0, 0,
547 "Aborted command, medium removal prevented"},
548 {0x0B5A0000, 0, 0,
549 "Command terminated by host"},
550 {0x0B5B8000, 0, 0,
551 "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571 * Function Prototypes
572 */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
584 * @type: trace type
585 * @add_data: additional data
586 *
587 * Return value:
588 * none
589 **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
592 {
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596 trace_entry = &ioa_cfg->trace[atomic_add_return
597 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598 trace_entry->time = jiffies;
599 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 trace_entry->type = type;
601 if (ipr_cmd->ioa_cfg->sis64)
602 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603 else
604 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 trace_entry->u.add_data = add_data;
608 wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
617 *
618 * Return value:
619 * none
620 **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623 unsigned long lock_flags;
624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 ipr_cmd->done(ipr_cmd);
628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
634 *
635 * Return value:
636 * none
637 **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643 dma_addr_t dma_addr = ipr_cmd->dma_addr;
644 int hrrq_id;
645
646 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649 ioarcb->data_transfer_length = 0;
650 ioarcb->read_data_transfer_length = 0;
651 ioarcb->ioadl_len = 0;
652 ioarcb->read_ioadl_len = 0;
653
654 if (ipr_cmd->ioa_cfg->sis64) {
655 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657 ioasa64->u.gata.status = 0;
658 } else {
659 ioarcb->write_ioadl_addr =
660 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662 ioasa->u.gata.status = 0;
663 }
664
665 ioasa->hdr.ioasc = 0;
666 ioasa->hdr.residual_data_len = 0;
667 ipr_cmd->scsi_cmd = NULL;
668 ipr_cmd->qc = NULL;
669 ipr_cmd->sense_buffer[0] = 0;
670 ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
676 *
677 * Return value:
678 * none
679 **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 void (*fast_done) (struct ipr_cmnd *))
682 {
683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL;
686 ipr_cmd->fast_done = fast_done;
687 init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692 * @ioa_cfg: ioa config struct
693 *
694 * Return value:
695 * pointer to ipr command struct
696 **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700 struct ipr_cmnd *ipr_cmd = NULL;
701
702 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 struct ipr_cmnd, queue);
705 list_del(&ipr_cmd->queue);
706 }
707
708
709 return ipr_cmd;
710 }
711
712 /**
713 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714 * @ioa_cfg: ioa config struct
715 *
716 * Return value:
717 * pointer to ipr command struct
718 **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722 struct ipr_cmnd *ipr_cmd =
723 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725 return ipr_cmd;
726 }
727
728 /**
729 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730 * @ioa_cfg: ioa config struct
731 * @clr_ints: interrupts to clear
732 *
733 * This function masks all interrupts on the adapter, then clears the
734 * interrupts specified in the mask
735 *
736 * Return value:
737 * none
738 **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740 u32 clr_ints)
741 {
742 volatile u32 int_reg;
743 int i;
744
745 /* Stop new interrupts */
746 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 spin_lock(&ioa_cfg->hrrq[i]._lock);
748 ioa_cfg->hrrq[i].allow_interrupts = 0;
749 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 }
751 wmb();
752
753 /* Set interrupt mask to stop all new interrupts */
754 if (ioa_cfg->sis64)
755 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 else
757 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759 /* Clear any pending interrupts */
760 if (ioa_cfg->sis64)
761 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * @ioa_cfg: ioa config struct
769 *
770 * Return value:
771 * 0 on success / -EIO on failure
772 **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777 if (pcix_cmd_reg == 0)
778 return 0;
779
780 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 return -EIO;
784 }
785
786 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787 return 0;
788 }
789
790 /**
791 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * @ioa_cfg: ioa config struct
793 *
794 * Return value:
795 * 0 on success / -EIO on failure
796 **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801 if (pcix_cmd_reg) {
802 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805 return -EIO;
806 }
807 }
808
809 return 0;
810 }
811
812 /**
813 * ipr_sata_eh_done - done function for aborted SATA commands
814 * @ipr_cmd: ipr command struct
815 *
816 * This function is invoked for ops generated to SATA
817 * devices which are being aborted.
818 *
819 * Return value:
820 * none
821 **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824 struct ata_queued_cmd *qc = ipr_cmd->qc;
825 struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827 qc->err_mask |= AC_ERR_OTHER;
828 sata_port->ioasa.status |= ATA_BUSY;
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830 ata_qc_complete(qc);
831 }
832
833 /**
834 * ipr_scsi_eh_done - mid-layer done function for aborted ops
835 * @ipr_cmd: ipr command struct
836 *
837 * This function is invoked by the interrupt handler for
838 * ops generated by the SCSI mid-layer which are being aborted.
839 *
840 * Return value:
841 * none
842 **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847 scsi_cmd->result |= (DID_ERROR << 16);
848
849 scsi_dma_unmap(ipr_cmd->scsi_cmd);
850 scsi_cmd->scsi_done(scsi_cmd);
851 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855 * ipr_fail_all_ops - Fails all outstanding ops.
856 * @ioa_cfg: ioa config struct
857 *
858 * This function fails all outstanding ops.
859 *
860 * Return value:
861 * none
862 **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865 struct ipr_cmnd *ipr_cmd, *temp;
866 struct ipr_hrr_queue *hrrq;
867
868 ENTER;
869 for_each_hrrq(hrrq, ioa_cfg) {
870 spin_lock(&hrrq->_lock);
871 list_for_each_entry_safe(ipr_cmd,
872 temp, &hrrq->hrrq_pending_q, queue) {
873 list_del(&ipr_cmd->queue);
874
875 ipr_cmd->s.ioasa.hdr.ioasc =
876 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 ipr_cmd->s.ioasa.hdr.ilid =
878 cpu_to_be32(IPR_DRIVER_ILID);
879
880 if (ipr_cmd->scsi_cmd)
881 ipr_cmd->done = ipr_scsi_eh_done;
882 else if (ipr_cmd->qc)
883 ipr_cmd->done = ipr_sata_eh_done;
884
885 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 IPR_IOASC_IOA_WAS_RESET);
887 del_timer(&ipr_cmd->timer);
888 ipr_cmd->done(ipr_cmd);
889 }
890 spin_unlock(&hrrq->_lock);
891 }
892 LEAVE;
893 }
894
895 /**
896 * ipr_send_command - Send driver initiated requests.
897 * @ipr_cmd: ipr command struct
898 *
899 * This function sends a command to the adapter using the correct write call.
900 * In the case of sis64, calculate the ioarcb size required. Then or in the
901 * appropriate bits.
902 *
903 * Return value:
904 * none
905 **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911 if (ioa_cfg->sis64) {
912 /* The default size is 256 bytes */
913 send_dma_addr |= 0x1;
914
915 /* If the number of ioadls * size of ioadl > 128 bytes,
916 then use a 512 byte ioarcb */
917 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 send_dma_addr |= 0x4;
919 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920 } else
921 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925 * ipr_do_req - Send driver initiated requests.
926 * @ipr_cmd: ipr command struct
927 * @done: done function
928 * @timeout_func: timeout function
929 * @timeout: timeout value
930 *
931 * This function sends the specified command to the adapter with the
932 * timeout given. The done function is invoked on command completion.
933 *
934 * Return value:
935 * none
936 **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 void (*done) (struct ipr_cmnd *),
939 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943 ipr_cmd->done = done;
944
945 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 ipr_cmd->timer.expires = jiffies + timeout;
947 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949 add_timer(&ipr_cmd->timer);
950
951 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953 ipr_send_command(ipr_cmd);
954 }
955
956 /**
957 * ipr_internal_cmd_done - Op done function for an internally generated op.
958 * @ipr_cmd: ipr command struct
959 *
960 * This function is the op done function for an internally generated,
961 * blocking op. It simply wakes the sleeping thread.
962 *
963 * Return value:
964 * none
965 **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968 if (ipr_cmd->sibling)
969 ipr_cmd->sibling = NULL;
970 else
971 complete(&ipr_cmd->completion);
972 }
973
974 /**
975 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976 * @ipr_cmd: ipr command struct
977 * @dma_addr: dma address
978 * @len: transfer length
979 * @flags: ioadl flag value
980 *
981 * This function initializes an ioadl in the case where there is only a single
982 * descriptor.
983 *
984 * Return value:
985 * nothing
986 **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988 u32 len, int flags)
989 {
990 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993 ipr_cmd->dma_use_sg = 1;
994
995 if (ipr_cmd->ioa_cfg->sis64) {
996 ioadl64->flags = cpu_to_be32(flags);
997 ioadl64->data_len = cpu_to_be32(len);
998 ioadl64->address = cpu_to_be64(dma_addr);
999
1000 ipr_cmd->ioarcb.ioadl_len =
1001 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003 } else {
1004 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 ioadl->address = cpu_to_be32(dma_addr);
1006
1007 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 ipr_cmd->ioarcb.read_ioadl_len =
1009 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011 } else {
1012 ipr_cmd->ioarcb.ioadl_len =
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015 }
1016 }
1017 }
1018
1019 /**
1020 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021 * @ipr_cmd: ipr command struct
1022 * @timeout_func: function to invoke if command times out
1023 * @timeout: timeout
1024 *
1025 * Return value:
1026 * none
1027 **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030 u32 timeout)
1031 {
1032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034 init_completion(&ipr_cmd->completion);
1035 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037 spin_unlock_irq(ioa_cfg->host->host_lock);
1038 wait_for_completion(&ipr_cmd->completion);
1039 spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044 if (ioa_cfg->hrrq_num == 1)
1045 return 0;
1046 else
1047 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051 * ipr_send_hcam - Send an HCAM to the adapter.
1052 * @ioa_cfg: ioa config struct
1053 * @type: HCAM type
1054 * @hostrcb: hostrcb struct
1055 *
1056 * This function will send a Host Controlled Async command to the adapter.
1057 * If HCAMs are currently not allowed to be issued to the adapter, it will
1058 * place the hostrcb on the free queue.
1059 *
1060 * Return value:
1061 * none
1062 **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 struct ipr_hostrcb *hostrcb)
1065 {
1066 struct ipr_cmnd *ipr_cmd;
1067 struct ipr_ioarcb *ioarcb;
1068
1069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074 ipr_cmd->u.hostrcb = hostrcb;
1075 ioarcb = &ipr_cmd->ioarcb;
1076
1077 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 ioarcb->cmd_pkt.cdb[1] = type;
1081 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 ipr_cmd->done = ipr_process_ccn;
1089 else
1090 ipr_cmd->done = ipr_process_error;
1091
1092 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094 ipr_send_command(ipr_cmd);
1095 } else {
1096 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097 }
1098 }
1099
1100 /**
1101 * ipr_update_ata_class - Update the ata class in the resource entry
1102 * @res: resource entry struct
1103 * @proto: cfgte device bus protocol value
1104 *
1105 * Return value:
1106 * none
1107 **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110 switch (proto) {
1111 case IPR_PROTO_SATA:
1112 case IPR_PROTO_SAS_STP:
1113 res->ata_class = ATA_DEV_ATA;
1114 break;
1115 case IPR_PROTO_SATA_ATAPI:
1116 case IPR_PROTO_SAS_STP_ATAPI:
1117 res->ata_class = ATA_DEV_ATAPI;
1118 break;
1119 default:
1120 res->ata_class = ATA_DEV_UNKNOWN;
1121 break;
1122 };
1123 }
1124
1125 /**
1126 * ipr_init_res_entry - Initialize a resource entry struct.
1127 * @res: resource entry struct
1128 * @cfgtew: config table entry wrapper struct
1129 *
1130 * Return value:
1131 * none
1132 **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136 int found = 0;
1137 unsigned int proto;
1138 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 struct ipr_resource_entry *gscsi_res = NULL;
1140
1141 res->needs_sync_complete = 0;
1142 res->in_erp = 0;
1143 res->add_to_ml = 0;
1144 res->del_from_ml = 0;
1145 res->resetting_device = 0;
1146 res->reset_occurred = 0;
1147 res->sdev = NULL;
1148 res->sata_port = NULL;
1149
1150 if (ioa_cfg->sis64) {
1151 proto = cfgtew->u.cfgte64->proto;
1152 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154 res->type = cfgtew->u.cfgte64->res_type;
1155
1156 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 sizeof(res->res_path));
1158
1159 res->bus = 0;
1160 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 sizeof(res->dev_lun.scsi_lun));
1162 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167 found = 1;
1168 res->target = gscsi_res->target;
1169 break;
1170 }
1171 }
1172 if (!found) {
1173 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 ioa_cfg->max_devs_supported);
1175 set_bit(res->target, ioa_cfg->target_ids);
1176 }
1177 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179 res->target = 0;
1180 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 ioa_cfg->max_devs_supported);
1184 set_bit(res->target, ioa_cfg->array_ids);
1185 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 res->bus = IPR_VSET_VIRTUAL_BUS;
1187 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 ioa_cfg->max_devs_supported);
1189 set_bit(res->target, ioa_cfg->vset_ids);
1190 } else {
1191 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 ioa_cfg->max_devs_supported);
1193 set_bit(res->target, ioa_cfg->target_ids);
1194 }
1195 } else {
1196 proto = cfgtew->u.cfgte->proto;
1197 res->qmodel = IPR_QUEUEING_MODEL(res);
1198 res->flags = cfgtew->u.cfgte->flags;
1199 if (res->flags & IPR_IS_IOA_RESOURCE)
1200 res->type = IPR_RES_TYPE_IOAFP;
1201 else
1202 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 res->target = cfgtew->u.cfgte->res_addr.target;
1206 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208 }
1209
1210 ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214 * ipr_is_same_device - Determine if two devices are the same.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1217 *
1218 * Return value:
1219 * 1 if the devices are the same / 0 otherwise
1220 **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224 if (res->ioa_cfg->sis64) {
1225 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228 sizeof(cfgtew->u.cfgte64->lun))) {
1229 return 1;
1230 }
1231 } else {
1232 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 res->target == cfgtew->u.cfgte->res_addr.target &&
1234 res->lun == cfgtew->u.cfgte->res_addr.lun)
1235 return 1;
1236 }
1237
1238 return 0;
1239 }
1240
1241 /**
1242 * __ipr_format_res_path - Format the resource path for printing.
1243 * @res_path: resource path
1244 * @buf: buffer
1245 * @len: length of buffer provided
1246 *
1247 * Return value:
1248 * pointer to buffer
1249 **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252 int i;
1253 char *p = buffer;
1254
1255 *p = '\0';
1256 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260 return buffer;
1261 }
1262
1263 /**
1264 * ipr_format_res_path - Format the resource path for printing.
1265 * @ioa_cfg: ioa config struct
1266 * @res_path: resource path
1267 * @buf: buffer
1268 * @len: length of buffer provided
1269 *
1270 * Return value:
1271 * pointer to buffer
1272 **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 u8 *res_path, char *buffer, int len)
1275 {
1276 char *p = buffer;
1277
1278 *p = '\0';
1279 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 __ipr_format_res_path(res_path, p, len - (buffer - p));
1281 return buffer;
1282 }
1283
1284 /**
1285 * ipr_update_res_entry - Update the resource entry.
1286 * @res: resource entry struct
1287 * @cfgtew: config table entry wrapper struct
1288 *
1289 * Return value:
1290 * none
1291 **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295 char buffer[IPR_MAX_RES_PATH_LENGTH];
1296 unsigned int proto;
1297 int new_path = 0;
1298
1299 if (res->ioa_cfg->sis64) {
1300 res->flags = cfgtew->u.cfgte64->flags;
1301 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302 res->type = cfgtew->u.cfgte64->res_type;
1303
1304 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 sizeof(struct ipr_std_inq_data));
1306
1307 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 proto = cfgtew->u.cfgte64->proto;
1309 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 sizeof(res->dev_lun.scsi_lun));
1314
1315 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 sizeof(res->res_path))) {
1317 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 sizeof(res->res_path));
1319 new_path = 1;
1320 }
1321
1322 if (res->sdev && new_path)
1323 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324 ipr_format_res_path(res->ioa_cfg,
1325 res->res_path, buffer, sizeof(buffer)));
1326 } else {
1327 res->flags = cfgtew->u.cfgte->flags;
1328 if (res->flags & IPR_IS_IOA_RESOURCE)
1329 res->type = IPR_RES_TYPE_IOAFP;
1330 else
1331 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 sizeof(struct ipr_std_inq_data));
1335
1336 res->qmodel = IPR_QUEUEING_MODEL(res);
1337 proto = cfgtew->u.cfgte->proto;
1338 res->res_handle = cfgtew->u.cfgte->res_handle;
1339 }
1340
1341 ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346 * for the resource.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355 struct ipr_resource_entry *gscsi_res = NULL;
1356 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358 if (!ioa_cfg->sis64)
1359 return;
1360
1361 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 clear_bit(res->target, ioa_cfg->array_ids);
1363 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 clear_bit(res->target, ioa_cfg->vset_ids);
1365 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368 return;
1369 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371 } else if (res->bus == 0)
1372 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376 * ipr_handle_config_change - Handle a config change from the adapter
1377 * @ioa_cfg: ioa config struct
1378 * @hostrcb: hostrcb
1379 *
1380 * Return value:
1381 * none
1382 **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384 struct ipr_hostrcb *hostrcb)
1385 {
1386 struct ipr_resource_entry *res = NULL;
1387 struct ipr_config_table_entry_wrapper cfgtew;
1388 __be32 cc_res_handle;
1389
1390 u32 is_ndn = 1;
1391
1392 if (ioa_cfg->sis64) {
1393 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395 } else {
1396 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398 }
1399
1400 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401 if (res->res_handle == cc_res_handle) {
1402 is_ndn = 0;
1403 break;
1404 }
1405 }
1406
1407 if (is_ndn) {
1408 if (list_empty(&ioa_cfg->free_res_q)) {
1409 ipr_send_hcam(ioa_cfg,
1410 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411 hostrcb);
1412 return;
1413 }
1414
1415 res = list_entry(ioa_cfg->free_res_q.next,
1416 struct ipr_resource_entry, queue);
1417
1418 list_del(&res->queue);
1419 ipr_init_res_entry(res, &cfgtew);
1420 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421 }
1422
1423 ipr_update_res_entry(res, &cfgtew);
1424
1425 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426 if (res->sdev) {
1427 res->del_from_ml = 1;
1428 res->res_handle = IPR_INVALID_RES_HANDLE;
1429 if (ioa_cfg->allow_ml_add_del)
1430 schedule_work(&ioa_cfg->work_q);
1431 } else {
1432 ipr_clear_res_target(res);
1433 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 }
1435 } else if (!res->sdev || res->del_from_ml) {
1436 res->add_to_ml = 1;
1437 if (ioa_cfg->allow_ml_add_del)
1438 schedule_work(&ioa_cfg->work_q);
1439 }
1440
1441 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445 * ipr_process_ccn - Op done function for a CCN.
1446 * @ipr_cmd: ipr command struct
1447 *
1448 * This function is the op done function for a configuration
1449 * change notification host controlled async from the adapter.
1450 *
1451 * Return value:
1452 * none
1453 **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460 list_del(&hostrcb->queue);
1461 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463 if (ioasc) {
1464 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465 dev_err(&ioa_cfg->pdev->dev,
1466 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469 } else {
1470 ipr_handle_config_change(ioa_cfg, hostrcb);
1471 }
1472 }
1473
1474 /**
1475 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476 * @i: index into buffer
1477 * @buf: string to modify
1478 *
1479 * This function will strip all trailing whitespace, pad the end
1480 * of the string with a single space, and NULL terminate the string.
1481 *
1482 * Return value:
1483 * new length of string
1484 **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487 while (i && buf[i] == ' ')
1488 i--;
1489 buf[i+1] = ' ';
1490 buf[i+2] = '\0';
1491 return i + 2;
1492 }
1493
1494 /**
1495 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496 * @prefix: string to print at start of printk
1497 * @hostrcb: hostrcb pointer
1498 * @vpd: vendor/product id/sn struct
1499 *
1500 * Return value:
1501 * none
1502 **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504 struct ipr_vpd *vpd)
1505 {
1506 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507 int i = 0;
1508
1509 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522 * ipr_log_vpd - Log the passed VPD to the error log.
1523 * @vpd: vendor/product id/sn struct
1524 *
1525 * Return value:
1526 * none
1527 **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531 + IPR_SERIAL_NUM_LEN];
1532
1533 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535 IPR_PROD_ID_LEN);
1536 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537 ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541 ipr_err(" Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546 * @prefix: string to print at start of printk
1547 * @hostrcb: hostrcb pointer
1548 * @vpd: vendor/product id/sn/wwn struct
1549 *
1550 * Return value:
1551 * none
1552 **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554 struct ipr_ext_vpd *vpd)
1555 {
1556 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563 * @vpd: vendor/product id/sn/wwn struct
1564 *
1565 * Return value:
1566 * none
1567 **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570 ipr_log_vpd(&vpd->vpd);
1571 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576 * ipr_log_enhanced_cache_error - Log a cache error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1579 *
1580 * Return value:
1581 * none
1582 **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 struct ipr_hostrcb *hostrcb)
1585 {
1586 struct ipr_hostrcb_type_12_error *error;
1587
1588 if (ioa_cfg->sis64)
1589 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590 else
1591 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593 ipr_err("-----Current Configuration-----\n");
1594 ipr_err("Cache Directory Card Information:\n");
1595 ipr_log_ext_vpd(&error->ioa_vpd);
1596 ipr_err("Adapter Card Information:\n");
1597 ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599 ipr_err("-----Expected Configuration-----\n");
1600 ipr_err("Cache Directory Card Information:\n");
1601 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602 ipr_err("Adapter Card Information:\n");
1603 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606 be32_to_cpu(error->ioa_data[0]),
1607 be32_to_cpu(error->ioa_data[1]),
1608 be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612 * ipr_log_cache_error - Log a cache error.
1613 * @ioa_cfg: ioa config struct
1614 * @hostrcb: hostrcb struct
1615 *
1616 * Return value:
1617 * none
1618 **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620 struct ipr_hostrcb *hostrcb)
1621 {
1622 struct ipr_hostrcb_type_02_error *error =
1623 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625 ipr_err("-----Current Configuration-----\n");
1626 ipr_err("Cache Directory Card Information:\n");
1627 ipr_log_vpd(&error->ioa_vpd);
1628 ipr_err("Adapter Card Information:\n");
1629 ipr_log_vpd(&error->cfc_vpd);
1630
1631 ipr_err("-----Expected Configuration-----\n");
1632 ipr_err("Cache Directory Card Information:\n");
1633 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634 ipr_err("Adapter Card Information:\n");
1635 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638 be32_to_cpu(error->ioa_data[0]),
1639 be32_to_cpu(error->ioa_data[1]),
1640 be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644 * ipr_log_enhanced_config_error - Log a configuration error.
1645 * @ioa_cfg: ioa config struct
1646 * @hostrcb: hostrcb struct
1647 *
1648 * Return value:
1649 * none
1650 **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652 struct ipr_hostrcb *hostrcb)
1653 {
1654 int errors_logged, i;
1655 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656 struct ipr_hostrcb_type_13_error *error;
1657
1658 error = &hostrcb->hcam.u.error.u.type_13_error;
1659 errors_logged = be32_to_cpu(error->errors_logged);
1660
1661 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664 dev_entry = error->dev;
1665
1666 for (i = 0; i < errors_logged; i++, dev_entry++) {
1667 ipr_err_separator;
1668
1669 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672 ipr_err("-----New Device Information-----\n");
1673 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675 ipr_err("Cache Directory Card Information:\n");
1676 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678 ipr_err("Adapter Card Information:\n");
1679 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680 }
1681 }
1682
1683 /**
1684 * ipr_log_sis64_config_error - Log a device error.
1685 * @ioa_cfg: ioa config struct
1686 * @hostrcb: hostrcb struct
1687 *
1688 * Return value:
1689 * none
1690 **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692 struct ipr_hostrcb *hostrcb)
1693 {
1694 int errors_logged, i;
1695 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696 struct ipr_hostrcb_type_23_error *error;
1697 char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699 error = &hostrcb->hcam.u.error64.u.type_23_error;
1700 errors_logged = be32_to_cpu(error->errors_logged);
1701
1702 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705 dev_entry = error->dev;
1706
1707 for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 ipr_err_separator;
1709
1710 ipr_err("Device %d : %s", i + 1,
1711 __ipr_format_res_path(dev_entry->res_path,
1712 buffer, sizeof(buffer)));
1713 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715 ipr_err("-----New Device Information-----\n");
1716 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718 ipr_err("Cache Directory Card Information:\n");
1719 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721 ipr_err("Adapter Card Information:\n");
1722 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723 }
1724 }
1725
1726 /**
1727 * ipr_log_config_error - Log a configuration error.
1728 * @ioa_cfg: ioa config struct
1729 * @hostrcb: hostrcb struct
1730 *
1731 * Return value:
1732 * none
1733 **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735 struct ipr_hostrcb *hostrcb)
1736 {
1737 int errors_logged, i;
1738 struct ipr_hostrcb_device_data_entry *dev_entry;
1739 struct ipr_hostrcb_type_03_error *error;
1740
1741 error = &hostrcb->hcam.u.error.u.type_03_error;
1742 errors_logged = be32_to_cpu(error->errors_logged);
1743
1744 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747 dev_entry = error->dev;
1748
1749 for (i = 0; i < errors_logged; i++, dev_entry++) {
1750 ipr_err_separator;
1751
1752 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753 ipr_log_vpd(&dev_entry->vpd);
1754
1755 ipr_err("-----New Device Information-----\n");
1756 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758 ipr_err("Cache Directory Card Information:\n");
1759 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761 ipr_err("Adapter Card Information:\n");
1762 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765 be32_to_cpu(dev_entry->ioa_data[0]),
1766 be32_to_cpu(dev_entry->ioa_data[1]),
1767 be32_to_cpu(dev_entry->ioa_data[2]),
1768 be32_to_cpu(dev_entry->ioa_data[3]),
1769 be32_to_cpu(dev_entry->ioa_data[4]));
1770 }
1771 }
1772
1773 /**
1774 * ipr_log_enhanced_array_error - Log an array configuration error.
1775 * @ioa_cfg: ioa config struct
1776 * @hostrcb: hostrcb struct
1777 *
1778 * Return value:
1779 * none
1780 **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782 struct ipr_hostrcb *hostrcb)
1783 {
1784 int i, num_entries;
1785 struct ipr_hostrcb_type_14_error *error;
1786 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789 error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791 ipr_err_separator;
1792
1793 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794 error->protection_level,
1795 ioa_cfg->host->host_no,
1796 error->last_func_vset_res_addr.bus,
1797 error->last_func_vset_res_addr.target,
1798 error->last_func_vset_res_addr.lun);
1799
1800 ipr_err_separator;
1801
1802 array_entry = error->array_member;
1803 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804 ARRAY_SIZE(error->array_member));
1805
1806 for (i = 0; i < num_entries; i++, array_entry++) {
1807 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808 continue;
1809
1810 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811 ipr_err("Exposed Array Member %d:\n", i);
1812 else
1813 ipr_err("Array Member %d:\n", i);
1814
1815 ipr_log_ext_vpd(&array_entry->vpd);
1816 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818 "Expected Location");
1819
1820 ipr_err_separator;
1821 }
1822 }
1823
1824 /**
1825 * ipr_log_array_error - Log an array configuration error.
1826 * @ioa_cfg: ioa config struct
1827 * @hostrcb: hostrcb struct
1828 *
1829 * Return value:
1830 * none
1831 **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833 struct ipr_hostrcb *hostrcb)
1834 {
1835 int i;
1836 struct ipr_hostrcb_type_04_error *error;
1837 struct ipr_hostrcb_array_data_entry *array_entry;
1838 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840 error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842 ipr_err_separator;
1843
1844 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845 error->protection_level,
1846 ioa_cfg->host->host_no,
1847 error->last_func_vset_res_addr.bus,
1848 error->last_func_vset_res_addr.target,
1849 error->last_func_vset_res_addr.lun);
1850
1851 ipr_err_separator;
1852
1853 array_entry = error->array_member;
1854
1855 for (i = 0; i < 18; i++) {
1856 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857 continue;
1858
1859 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860 ipr_err("Exposed Array Member %d:\n", i);
1861 else
1862 ipr_err("Array Member %d:\n", i);
1863
1864 ipr_log_vpd(&array_entry->vpd);
1865
1866 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868 "Expected Location");
1869
1870 ipr_err_separator;
1871
1872 if (i == 9)
1873 array_entry = error->array_member2;
1874 else
1875 array_entry++;
1876 }
1877 }
1878
1879 /**
1880 * ipr_log_hex_data - Log additional hex IOA error data.
1881 * @ioa_cfg: ioa config struct
1882 * @data: IOA error data
1883 * @len: data length
1884 *
1885 * Return value:
1886 * none
1887 **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890 int i;
1891
1892 if (len == 0)
1893 return;
1894
1895 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898 for (i = 0; i < len / 4; i += 4) {
1899 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900 be32_to_cpu(data[i]),
1901 be32_to_cpu(data[i+1]),
1902 be32_to_cpu(data[i+2]),
1903 be32_to_cpu(data[i+3]));
1904 }
1905 }
1906
1907 /**
1908 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909 * @ioa_cfg: ioa config struct
1910 * @hostrcb: hostrcb struct
1911 *
1912 * Return value:
1913 * none
1914 **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916 struct ipr_hostrcb *hostrcb)
1917 {
1918 struct ipr_hostrcb_type_17_error *error;
1919
1920 if (ioa_cfg->sis64)
1921 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922 else
1923 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926 strim(error->failure_reason);
1927
1928 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929 be32_to_cpu(hostrcb->hcam.u.error.prc));
1930 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931 ipr_log_hex_data(ioa_cfg, error->data,
1932 be32_to_cpu(hostrcb->hcam.length) -
1933 (offsetof(struct ipr_hostrcb_error, u) +
1934 offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938 * ipr_log_dual_ioa_error - Log a dual adapter error.
1939 * @ioa_cfg: ioa config struct
1940 * @hostrcb: hostrcb struct
1941 *
1942 * Return value:
1943 * none
1944 **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946 struct ipr_hostrcb *hostrcb)
1947 {
1948 struct ipr_hostrcb_type_07_error *error;
1949
1950 error = &hostrcb->hcam.u.error.u.type_07_error;
1951 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952 strim(error->failure_reason);
1953
1954 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955 be32_to_cpu(hostrcb->hcam.u.error.prc));
1956 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957 ipr_log_hex_data(ioa_cfg, error->data,
1958 be32_to_cpu(hostrcb->hcam.length) -
1959 (offsetof(struct ipr_hostrcb_error, u) +
1960 offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964 u8 active;
1965 char *desc;
1966 } path_active_desc[] = {
1967 { IPR_PATH_NO_INFO, "Path" },
1968 { IPR_PATH_ACTIVE, "Active path" },
1969 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973 u8 state;
1974 char *desc;
1975 } path_state_desc[] = {
1976 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977 { IPR_PATH_HEALTHY, "is healthy" },
1978 { IPR_PATH_DEGRADED, "is degraded" },
1979 { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983 * ipr_log_fabric_path - Log a fabric path error
1984 * @hostrcb: hostrcb struct
1985 * @fabric: fabric descriptor
1986 *
1987 * Return value:
1988 * none
1989 **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993 int i, j;
1994 u8 path_state = fabric->path_state;
1995 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996 u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999 if (path_active_desc[i].active != active)
2000 continue;
2001
2002 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003 if (path_state_desc[j].state != state)
2004 continue;
2005
2006 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008 path_active_desc[i].desc, path_state_desc[j].desc,
2009 fabric->ioa_port);
2010 } else if (fabric->cascaded_expander == 0xff) {
2011 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012 path_active_desc[i].desc, path_state_desc[j].desc,
2013 fabric->ioa_port, fabric->phy);
2014 } else if (fabric->phy == 0xff) {
2015 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016 path_active_desc[i].desc, path_state_desc[j].desc,
2017 fabric->ioa_port, fabric->cascaded_expander);
2018 } else {
2019 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020 path_active_desc[i].desc, path_state_desc[j].desc,
2021 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022 }
2023 return;
2024 }
2025 }
2026
2027 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032 * ipr_log64_fabric_path - Log a fabric path error
2033 * @hostrcb: hostrcb struct
2034 * @fabric: fabric descriptor
2035 *
2036 * Return value:
2037 * none
2038 **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040 struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042 int i, j;
2043 u8 path_state = fabric->path_state;
2044 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045 u8 state = path_state & IPR_PATH_STATE_MASK;
2046 char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049 if (path_active_desc[i].active != active)
2050 continue;
2051
2052 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053 if (path_state_desc[j].state != state)
2054 continue;
2055
2056 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057 path_active_desc[i].desc, path_state_desc[j].desc,
2058 ipr_format_res_path(hostrcb->ioa_cfg,
2059 fabric->res_path,
2060 buffer, sizeof(buffer)));
2061 return;
2062 }
2063 }
2064
2065 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067 buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071 u8 type;
2072 char *desc;
2073 } path_type_desc[] = {
2074 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081 u8 status;
2082 char *desc;
2083 } path_status_desc[] = {
2084 { IPR_PATH_CFG_NO_PROB, "Functional" },
2085 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086 { IPR_PATH_CFG_FAILED, "Failed" },
2087 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088 { IPR_PATH_NOT_DETECTED, "Missing" },
2089 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093 "unknown",
2094 "disabled",
2095 "phy reset problem",
2096 "spinup hold",
2097 "port selector",
2098 "unknown",
2099 "unknown",
2100 "unknown",
2101 "1.5Gbps",
2102 "3.0Gbps",
2103 "unknown",
2104 "unknown",
2105 "unknown",
2106 "unknown",
2107 "unknown",
2108 "unknown"
2109 };
2110
2111 /**
2112 * ipr_log_path_elem - Log a fabric path element.
2113 * @hostrcb: hostrcb struct
2114 * @cfg: fabric path element struct
2115 *
2116 * Return value:
2117 * none
2118 **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120 struct ipr_hostrcb_config_element *cfg)
2121 {
2122 int i, j;
2123 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126 if (type == IPR_PATH_CFG_NOT_EXIST)
2127 return;
2128
2129 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130 if (path_type_desc[i].type != type)
2131 continue;
2132
2133 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134 if (path_status_desc[j].status != status)
2135 continue;
2136
2137 if (type == IPR_PATH_CFG_IOA_PORT) {
2138 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139 path_status_desc[j].desc, path_type_desc[i].desc,
2140 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142 } else {
2143 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145 path_status_desc[j].desc, path_type_desc[i].desc,
2146 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148 } else if (cfg->cascaded_expander == 0xff) {
2149 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150 "WWN=%08X%08X\n", path_status_desc[j].desc,
2151 path_type_desc[i].desc, cfg->phy,
2152 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154 } else if (cfg->phy == 0xff) {
2155 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156 "WWN=%08X%08X\n", path_status_desc[j].desc,
2157 path_type_desc[i].desc, cfg->cascaded_expander,
2158 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160 } else {
2161 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162 "WWN=%08X%08X\n", path_status_desc[j].desc,
2163 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166 }
2167 }
2168 return;
2169 }
2170 }
2171
2172 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179 * ipr_log64_path_elem - Log a fabric path element.
2180 * @hostrcb: hostrcb struct
2181 * @cfg: fabric path element struct
2182 *
2183 * Return value:
2184 * none
2185 **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189 int i, j;
2190 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193 char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196 return;
2197
2198 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199 if (path_type_desc[i].type != type)
2200 continue;
2201
2202 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203 if (path_status_desc[j].status != status)
2204 continue;
2205
2206 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207 path_status_desc[j].desc, path_type_desc[i].desc,
2208 ipr_format_res_path(hostrcb->ioa_cfg,
2209 cfg->res_path, buffer, sizeof(buffer)),
2210 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211 be32_to_cpu(cfg->wwid[0]),
2212 be32_to_cpu(cfg->wwid[1]));
2213 return;
2214 }
2215 }
2216 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217 "WWN=%08X%08X\n", cfg->type_status,
2218 ipr_format_res_path(hostrcb->ioa_cfg,
2219 cfg->res_path, buffer, sizeof(buffer)),
2220 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225 * ipr_log_fabric_error - Log a fabric error.
2226 * @ioa_cfg: ioa config struct
2227 * @hostrcb: hostrcb struct
2228 *
2229 * Return value:
2230 * none
2231 **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233 struct ipr_hostrcb *hostrcb)
2234 {
2235 struct ipr_hostrcb_type_20_error *error;
2236 struct ipr_hostrcb_fabric_desc *fabric;
2237 struct ipr_hostrcb_config_element *cfg;
2238 int i, add_len;
2239
2240 error = &hostrcb->hcam.u.error.u.type_20_error;
2241 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244 add_len = be32_to_cpu(hostrcb->hcam.length) -
2245 (offsetof(struct ipr_hostrcb_error, u) +
2246 offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249 ipr_log_fabric_path(hostrcb, fabric);
2250 for_each_fabric_cfg(fabric, cfg)
2251 ipr_log_path_elem(hostrcb, cfg);
2252
2253 add_len -= be16_to_cpu(fabric->length);
2254 fabric = (struct ipr_hostrcb_fabric_desc *)
2255 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256 }
2257
2258 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262 * ipr_log_sis64_array_error - Log a sis64 array error.
2263 * @ioa_cfg: ioa config struct
2264 * @hostrcb: hostrcb struct
2265 *
2266 * Return value:
2267 * none
2268 **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270 struct ipr_hostrcb *hostrcb)
2271 {
2272 int i, num_entries;
2273 struct ipr_hostrcb_type_24_error *error;
2274 struct ipr_hostrcb64_array_data_entry *array_entry;
2275 char buffer[IPR_MAX_RES_PATH_LENGTH];
2276 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278 error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280 ipr_err_separator;
2281
2282 ipr_err("RAID %s Array Configuration: %s\n",
2283 error->protection_level,
2284 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285 buffer, sizeof(buffer)));
2286
2287 ipr_err_separator;
2288
2289 array_entry = error->array_member;
2290 num_entries = min_t(u32, error->num_entries,
2291 ARRAY_SIZE(error->array_member));
2292
2293 for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296 continue;
2297
2298 if (error->exposed_mode_adn == i)
2299 ipr_err("Exposed Array Member %d:\n", i);
2300 else
2301 ipr_err("Array Member %d:\n", i);
2302
2303 ipr_err("Array Member %d:\n", i);
2304 ipr_log_ext_vpd(&array_entry->vpd);
2305 ipr_err("Current Location: %s\n",
2306 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307 buffer, sizeof(buffer)));
2308 ipr_err("Expected Location: %s\n",
2309 ipr_format_res_path(ioa_cfg,
2310 array_entry->expected_res_path,
2311 buffer, sizeof(buffer)));
2312
2313 ipr_err_separator;
2314 }
2315 }
2316
2317 /**
2318 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319 * @ioa_cfg: ioa config struct
2320 * @hostrcb: hostrcb struct
2321 *
2322 * Return value:
2323 * none
2324 **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326 struct ipr_hostrcb *hostrcb)
2327 {
2328 struct ipr_hostrcb_type_30_error *error;
2329 struct ipr_hostrcb64_fabric_desc *fabric;
2330 struct ipr_hostrcb64_config_element *cfg;
2331 int i, add_len;
2332
2333 error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338 add_len = be32_to_cpu(hostrcb->hcam.length) -
2339 (offsetof(struct ipr_hostrcb64_error, u) +
2340 offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343 ipr_log64_fabric_path(hostrcb, fabric);
2344 for_each_fabric_cfg(fabric, cfg)
2345 ipr_log64_path_elem(hostrcb, cfg);
2346
2347 add_len -= be16_to_cpu(fabric->length);
2348 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350 }
2351
2352 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356 * ipr_log_generic_error - Log an adapter error.
2357 * @ioa_cfg: ioa config struct
2358 * @hostrcb: hostrcb struct
2359 *
2360 * Return value:
2361 * none
2362 **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364 struct ipr_hostrcb *hostrcb)
2365 {
2366 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367 be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371 * ipr_log_sis64_device_error - Log a cache error.
2372 * @ioa_cfg: ioa config struct
2373 * @hostrcb: hostrcb struct
2374 *
2375 * Return value:
2376 * none
2377 **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379 struct ipr_hostrcb *hostrcb)
2380 {
2381 struct ipr_hostrcb_type_21_error *error;
2382 char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384 error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386 ipr_err("-----Failing Device Information-----\n");
2387 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390 ipr_err("Device Resource Path: %s\n",
2391 __ipr_format_res_path(error->res_path,
2392 buffer, sizeof(buffer)));
2393 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2397 ipr_err("SCSI Sense Data:\n");
2398 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399 ipr_err("SCSI Command Descriptor Block: \n");
2400 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402 ipr_err("Additional IOA Data:\n");
2403 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405
2406 /**
2407 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408 * @ioasc: IOASC
2409 *
2410 * This function will return the index of into the ipr_error_table
2411 * for the specified IOASC. If the IOASC is not in the table,
2412 * 0 will be returned, which points to the entry used for unknown errors.
2413 *
2414 * Return value:
2415 * index into the ipr_error_table
2416 **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419 int i;
2420
2421 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423 return i;
2424
2425 return 0;
2426 }
2427
2428 /**
2429 * ipr_handle_log_data - Log an adapter error.
2430 * @ioa_cfg: ioa config struct
2431 * @hostrcb: hostrcb struct
2432 *
2433 * This function logs an adapter error to the system.
2434 *
2435 * Return value:
2436 * none
2437 **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439 struct ipr_hostrcb *hostrcb)
2440 {
2441 u32 ioasc;
2442 int error_index;
2443
2444 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2445 return;
2446
2447 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2448 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2449
2450 if (ioa_cfg->sis64)
2451 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2452 else
2453 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2454
2455 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2456 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2457 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2458 scsi_report_bus_reset(ioa_cfg->host,
2459 hostrcb->hcam.u.error.fd_res_addr.bus);
2460 }
2461
2462 error_index = ipr_get_error(ioasc);
2463
2464 if (!ipr_error_table[error_index].log_hcam)
2465 return;
2466
2467 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2468
2469 /* Set indication we have logged an error */
2470 ioa_cfg->errors_logged++;
2471
2472 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2473 return;
2474 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2475 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2476
2477 switch (hostrcb->hcam.overlay_id) {
2478 case IPR_HOST_RCB_OVERLAY_ID_2:
2479 ipr_log_cache_error(ioa_cfg, hostrcb);
2480 break;
2481 case IPR_HOST_RCB_OVERLAY_ID_3:
2482 ipr_log_config_error(ioa_cfg, hostrcb);
2483 break;
2484 case IPR_HOST_RCB_OVERLAY_ID_4:
2485 case IPR_HOST_RCB_OVERLAY_ID_6:
2486 ipr_log_array_error(ioa_cfg, hostrcb);
2487 break;
2488 case IPR_HOST_RCB_OVERLAY_ID_7:
2489 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2490 break;
2491 case IPR_HOST_RCB_OVERLAY_ID_12:
2492 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2493 break;
2494 case IPR_HOST_RCB_OVERLAY_ID_13:
2495 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2496 break;
2497 case IPR_HOST_RCB_OVERLAY_ID_14:
2498 case IPR_HOST_RCB_OVERLAY_ID_16:
2499 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2500 break;
2501 case IPR_HOST_RCB_OVERLAY_ID_17:
2502 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2503 break;
2504 case IPR_HOST_RCB_OVERLAY_ID_20:
2505 ipr_log_fabric_error(ioa_cfg, hostrcb);
2506 break;
2507 case IPR_HOST_RCB_OVERLAY_ID_21:
2508 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2509 break;
2510 case IPR_HOST_RCB_OVERLAY_ID_23:
2511 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2512 break;
2513 case IPR_HOST_RCB_OVERLAY_ID_24:
2514 case IPR_HOST_RCB_OVERLAY_ID_26:
2515 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2516 break;
2517 case IPR_HOST_RCB_OVERLAY_ID_30:
2518 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2519 break;
2520 case IPR_HOST_RCB_OVERLAY_ID_1:
2521 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2522 default:
2523 ipr_log_generic_error(ioa_cfg, hostrcb);
2524 break;
2525 }
2526 }
2527
2528 /**
2529 * ipr_process_error - Op done function for an adapter error log.
2530 * @ipr_cmd: ipr command struct
2531 *
2532 * This function is the op done function for an error log host
2533 * controlled async from the adapter. It will log the error and
2534 * send the HCAM back to the adapter.
2535 *
2536 * Return value:
2537 * none
2538 **/
2539 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2540 {
2541 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2542 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2543 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2544 u32 fd_ioasc;
2545
2546 if (ioa_cfg->sis64)
2547 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2548 else
2549 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2550
2551 list_del(&hostrcb->queue);
2552 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2553
2554 if (!ioasc) {
2555 ipr_handle_log_data(ioa_cfg, hostrcb);
2556 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2557 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2558 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2559 dev_err(&ioa_cfg->pdev->dev,
2560 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2561 }
2562
2563 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2564 }
2565
2566 /**
2567 * ipr_timeout - An internally generated op has timed out.
2568 * @ipr_cmd: ipr command struct
2569 *
2570 * This function blocks host requests and initiates an
2571 * adapter reset.
2572 *
2573 * Return value:
2574 * none
2575 **/
2576 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2577 {
2578 unsigned long lock_flags = 0;
2579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2580
2581 ENTER;
2582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2583
2584 ioa_cfg->errors_logged++;
2585 dev_err(&ioa_cfg->pdev->dev,
2586 "Adapter being reset due to command timeout.\n");
2587
2588 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2589 ioa_cfg->sdt_state = GET_DUMP;
2590
2591 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2592 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2593
2594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2595 LEAVE;
2596 }
2597
2598 /**
2599 * ipr_oper_timeout - Adapter timed out transitioning to operational
2600 * @ipr_cmd: ipr command struct
2601 *
2602 * This function blocks host requests and initiates an
2603 * adapter reset.
2604 *
2605 * Return value:
2606 * none
2607 **/
2608 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2609 {
2610 unsigned long lock_flags = 0;
2611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2612
2613 ENTER;
2614 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2615
2616 ioa_cfg->errors_logged++;
2617 dev_err(&ioa_cfg->pdev->dev,
2618 "Adapter timed out transitioning to operational.\n");
2619
2620 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2621 ioa_cfg->sdt_state = GET_DUMP;
2622
2623 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2624 if (ipr_fastfail)
2625 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2626 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2627 }
2628
2629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2630 LEAVE;
2631 }
2632
2633 /**
2634 * ipr_find_ses_entry - Find matching SES in SES table
2635 * @res: resource entry struct of SES
2636 *
2637 * Return value:
2638 * pointer to SES table entry / NULL on failure
2639 **/
2640 static const struct ipr_ses_table_entry *
2641 ipr_find_ses_entry(struct ipr_resource_entry *res)
2642 {
2643 int i, j, matches;
2644 struct ipr_std_inq_vpids *vpids;
2645 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2646
2647 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2648 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2649 if (ste->compare_product_id_byte[j] == 'X') {
2650 vpids = &res->std_inq_data.vpids;
2651 if (vpids->product_id[j] == ste->product_id[j])
2652 matches++;
2653 else
2654 break;
2655 } else
2656 matches++;
2657 }
2658
2659 if (matches == IPR_PROD_ID_LEN)
2660 return ste;
2661 }
2662
2663 return NULL;
2664 }
2665
2666 /**
2667 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2668 * @ioa_cfg: ioa config struct
2669 * @bus: SCSI bus
2670 * @bus_width: bus width
2671 *
2672 * Return value:
2673 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2674 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2675 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2676 * max 160MHz = max 320MB/sec).
2677 **/
2678 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2679 {
2680 struct ipr_resource_entry *res;
2681 const struct ipr_ses_table_entry *ste;
2682 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2683
2684 /* Loop through each config table entry in the config table buffer */
2685 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2686 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2687 continue;
2688
2689 if (bus != res->bus)
2690 continue;
2691
2692 if (!(ste = ipr_find_ses_entry(res)))
2693 continue;
2694
2695 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2696 }
2697
2698 return max_xfer_rate;
2699 }
2700
2701 /**
2702 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2703 * @ioa_cfg: ioa config struct
2704 * @max_delay: max delay in micro-seconds to wait
2705 *
2706 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2707 *
2708 * Return value:
2709 * 0 on success / other on failure
2710 **/
2711 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2712 {
2713 volatile u32 pcii_reg;
2714 int delay = 1;
2715
2716 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2717 while (delay < max_delay) {
2718 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2719
2720 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2721 return 0;
2722
2723 /* udelay cannot be used if delay is more than a few milliseconds */
2724 if ((delay / 1000) > MAX_UDELAY_MS)
2725 mdelay(delay / 1000);
2726 else
2727 udelay(delay);
2728
2729 delay += delay;
2730 }
2731 return -EIO;
2732 }
2733
2734 /**
2735 * ipr_get_sis64_dump_data_section - Dump IOA memory
2736 * @ioa_cfg: ioa config struct
2737 * @start_addr: adapter address to dump
2738 * @dest: destination kernel buffer
2739 * @length_in_words: length to dump in 4 byte words
2740 *
2741 * Return value:
2742 * 0 on success
2743 **/
2744 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2745 u32 start_addr,
2746 __be32 *dest, u32 length_in_words)
2747 {
2748 int i;
2749
2750 for (i = 0; i < length_in_words; i++) {
2751 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2752 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2753 dest++;
2754 }
2755
2756 return 0;
2757 }
2758
2759 /**
2760 * ipr_get_ldump_data_section - Dump IOA memory
2761 * @ioa_cfg: ioa config struct
2762 * @start_addr: adapter address to dump
2763 * @dest: destination kernel buffer
2764 * @length_in_words: length to dump in 4 byte words
2765 *
2766 * Return value:
2767 * 0 on success / -EIO on failure
2768 **/
2769 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2770 u32 start_addr,
2771 __be32 *dest, u32 length_in_words)
2772 {
2773 volatile u32 temp_pcii_reg;
2774 int i, delay = 0;
2775
2776 if (ioa_cfg->sis64)
2777 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2778 dest, length_in_words);
2779
2780 /* Write IOA interrupt reg starting LDUMP state */
2781 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2782 ioa_cfg->regs.set_uproc_interrupt_reg32);
2783
2784 /* Wait for IO debug acknowledge */
2785 if (ipr_wait_iodbg_ack(ioa_cfg,
2786 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2787 dev_err(&ioa_cfg->pdev->dev,
2788 "IOA dump long data transfer timeout\n");
2789 return -EIO;
2790 }
2791
2792 /* Signal LDUMP interlocked - clear IO debug ack */
2793 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2794 ioa_cfg->regs.clr_interrupt_reg);
2795
2796 /* Write Mailbox with starting address */
2797 writel(start_addr, ioa_cfg->ioa_mailbox);
2798
2799 /* Signal address valid - clear IOA Reset alert */
2800 writel(IPR_UPROCI_RESET_ALERT,
2801 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2802
2803 for (i = 0; i < length_in_words; i++) {
2804 /* Wait for IO debug acknowledge */
2805 if (ipr_wait_iodbg_ack(ioa_cfg,
2806 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2807 dev_err(&ioa_cfg->pdev->dev,
2808 "IOA dump short data transfer timeout\n");
2809 return -EIO;
2810 }
2811
2812 /* Read data from mailbox and increment destination pointer */
2813 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2814 dest++;
2815
2816 /* For all but the last word of data, signal data received */
2817 if (i < (length_in_words - 1)) {
2818 /* Signal dump data received - Clear IO debug Ack */
2819 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2820 ioa_cfg->regs.clr_interrupt_reg);
2821 }
2822 }
2823
2824 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2825 writel(IPR_UPROCI_RESET_ALERT,
2826 ioa_cfg->regs.set_uproc_interrupt_reg32);
2827
2828 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2829 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2830
2831 /* Signal dump data received - Clear IO debug Ack */
2832 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2833 ioa_cfg->regs.clr_interrupt_reg);
2834
2835 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2836 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2837 temp_pcii_reg =
2838 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2839
2840 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2841 return 0;
2842
2843 udelay(10);
2844 delay += 10;
2845 }
2846
2847 return 0;
2848 }
2849
2850 #ifdef CONFIG_SCSI_IPR_DUMP
2851 /**
2852 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2853 * @ioa_cfg: ioa config struct
2854 * @pci_address: adapter address
2855 * @length: length of data to copy
2856 *
2857 * Copy data from PCI adapter to kernel buffer.
2858 * Note: length MUST be a 4 byte multiple
2859 * Return value:
2860 * 0 on success / other on failure
2861 **/
2862 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2863 unsigned long pci_address, u32 length)
2864 {
2865 int bytes_copied = 0;
2866 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2867 __be32 *page;
2868 unsigned long lock_flags = 0;
2869 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2870
2871 if (ioa_cfg->sis64)
2872 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2873 else
2874 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2875
2876 while (bytes_copied < length &&
2877 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2878 if (ioa_dump->page_offset >= PAGE_SIZE ||
2879 ioa_dump->page_offset == 0) {
2880 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2881
2882 if (!page) {
2883 ipr_trace;
2884 return bytes_copied;
2885 }
2886
2887 ioa_dump->page_offset = 0;
2888 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2889 ioa_dump->next_page_index++;
2890 } else
2891 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2892
2893 rem_len = length - bytes_copied;
2894 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2895 cur_len = min(rem_len, rem_page_len);
2896
2897 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2898 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2899 rc = -EIO;
2900 } else {
2901 rc = ipr_get_ldump_data_section(ioa_cfg,
2902 pci_address + bytes_copied,
2903 &page[ioa_dump->page_offset / 4],
2904 (cur_len / sizeof(u32)));
2905 }
2906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2907
2908 if (!rc) {
2909 ioa_dump->page_offset += cur_len;
2910 bytes_copied += cur_len;
2911 } else {
2912 ipr_trace;
2913 break;
2914 }
2915 schedule();
2916 }
2917
2918 return bytes_copied;
2919 }
2920
2921 /**
2922 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2923 * @hdr: dump entry header struct
2924 *
2925 * Return value:
2926 * nothing
2927 **/
2928 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2929 {
2930 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2931 hdr->num_elems = 1;
2932 hdr->offset = sizeof(*hdr);
2933 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2934 }
2935
2936 /**
2937 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2938 * @ioa_cfg: ioa config struct
2939 * @driver_dump: driver dump struct
2940 *
2941 * Return value:
2942 * nothing
2943 **/
2944 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2945 struct ipr_driver_dump *driver_dump)
2946 {
2947 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2948
2949 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2950 driver_dump->ioa_type_entry.hdr.len =
2951 sizeof(struct ipr_dump_ioa_type_entry) -
2952 sizeof(struct ipr_dump_entry_header);
2953 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2954 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2955 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2956 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2957 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2958 ucode_vpd->minor_release[1];
2959 driver_dump->hdr.num_entries++;
2960 }
2961
2962 /**
2963 * ipr_dump_version_data - Fill in the driver version in the dump.
2964 * @ioa_cfg: ioa config struct
2965 * @driver_dump: driver dump struct
2966 *
2967 * Return value:
2968 * nothing
2969 **/
2970 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2971 struct ipr_driver_dump *driver_dump)
2972 {
2973 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2974 driver_dump->version_entry.hdr.len =
2975 sizeof(struct ipr_dump_version_entry) -
2976 sizeof(struct ipr_dump_entry_header);
2977 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2978 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2979 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2980 driver_dump->hdr.num_entries++;
2981 }
2982
2983 /**
2984 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2985 * @ioa_cfg: ioa config struct
2986 * @driver_dump: driver dump struct
2987 *
2988 * Return value:
2989 * nothing
2990 **/
2991 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2992 struct ipr_driver_dump *driver_dump)
2993 {
2994 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2995 driver_dump->trace_entry.hdr.len =
2996 sizeof(struct ipr_dump_trace_entry) -
2997 sizeof(struct ipr_dump_entry_header);
2998 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2999 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3000 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3001 driver_dump->hdr.num_entries++;
3002 }
3003
3004 /**
3005 * ipr_dump_location_data - Fill in the IOA location in the dump.
3006 * @ioa_cfg: ioa config struct
3007 * @driver_dump: driver dump struct
3008 *
3009 * Return value:
3010 * nothing
3011 **/
3012 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3013 struct ipr_driver_dump *driver_dump)
3014 {
3015 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3016 driver_dump->location_entry.hdr.len =
3017 sizeof(struct ipr_dump_location_entry) -
3018 sizeof(struct ipr_dump_entry_header);
3019 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3020 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3021 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3022 driver_dump->hdr.num_entries++;
3023 }
3024
3025 /**
3026 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3027 * @ioa_cfg: ioa config struct
3028 * @dump: dump struct
3029 *
3030 * Return value:
3031 * nothing
3032 **/
3033 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3034 {
3035 unsigned long start_addr, sdt_word;
3036 unsigned long lock_flags = 0;
3037 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3038 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3039 u32 num_entries, max_num_entries, start_off, end_off;
3040 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3041 struct ipr_sdt *sdt;
3042 int valid = 1;
3043 int i;
3044
3045 ENTER;
3046
3047 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3048
3049 if (ioa_cfg->sdt_state != READ_DUMP) {
3050 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 return;
3052 }
3053
3054 if (ioa_cfg->sis64) {
3055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3056 ssleep(IPR_DUMP_DELAY_SECONDS);
3057 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058 }
3059
3060 start_addr = readl(ioa_cfg->ioa_mailbox);
3061
3062 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3063 dev_err(&ioa_cfg->pdev->dev,
3064 "Invalid dump table format: %lx\n", start_addr);
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 return;
3067 }
3068
3069 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3070
3071 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3072
3073 /* Initialize the overall dump header */
3074 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3075 driver_dump->hdr.num_entries = 1;
3076 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3077 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3078 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3079 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3080
3081 ipr_dump_version_data(ioa_cfg, driver_dump);
3082 ipr_dump_location_data(ioa_cfg, driver_dump);
3083 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3084 ipr_dump_trace_data(ioa_cfg, driver_dump);
3085
3086 /* Update dump_header */
3087 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3088
3089 /* IOA Dump entry */
3090 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3091 ioa_dump->hdr.len = 0;
3092 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3094
3095 /* First entries in sdt are actually a list of dump addresses and
3096 lengths to gather the real dump data. sdt represents the pointer
3097 to the ioa generated dump table. Dump data will be extracted based
3098 on entries in this table */
3099 sdt = &ioa_dump->sdt;
3100
3101 if (ioa_cfg->sis64) {
3102 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3103 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3104 } else {
3105 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3106 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3107 }
3108
3109 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3110 (max_num_entries * sizeof(struct ipr_sdt_entry));
3111 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3112 bytes_to_copy / sizeof(__be32));
3113
3114 /* Smart Dump table is ready to use and the first entry is valid */
3115 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3116 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3117 dev_err(&ioa_cfg->pdev->dev,
3118 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3119 rc, be32_to_cpu(sdt->hdr.state));
3120 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3121 ioa_cfg->sdt_state = DUMP_OBTAINED;
3122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3123 return;
3124 }
3125
3126 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3127
3128 if (num_entries > max_num_entries)
3129 num_entries = max_num_entries;
3130
3131 /* Update dump length to the actual data to be copied */
3132 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3133 if (ioa_cfg->sis64)
3134 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3135 else
3136 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3137
3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139
3140 for (i = 0; i < num_entries; i++) {
3141 if (ioa_dump->hdr.len > max_dump_size) {
3142 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3143 break;
3144 }
3145
3146 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3147 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3148 if (ioa_cfg->sis64)
3149 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3150 else {
3151 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3152 end_off = be32_to_cpu(sdt->entry[i].end_token);
3153
3154 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3155 bytes_to_copy = end_off - start_off;
3156 else
3157 valid = 0;
3158 }
3159 if (valid) {
3160 if (bytes_to_copy > max_dump_size) {
3161 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3162 continue;
3163 }
3164
3165 /* Copy data from adapter to driver buffers */
3166 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3167 bytes_to_copy);
3168
3169 ioa_dump->hdr.len += bytes_copied;
3170
3171 if (bytes_copied != bytes_to_copy) {
3172 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3173 break;
3174 }
3175 }
3176 }
3177 }
3178
3179 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3180
3181 /* Update dump_header */
3182 driver_dump->hdr.len += ioa_dump->hdr.len;
3183 wmb();
3184 ioa_cfg->sdt_state = DUMP_OBTAINED;
3185 LEAVE;
3186 }
3187
3188 #else
3189 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3190 #endif
3191
3192 /**
3193 * ipr_release_dump - Free adapter dump memory
3194 * @kref: kref struct
3195 *
3196 * Return value:
3197 * nothing
3198 **/
3199 static void ipr_release_dump(struct kref *kref)
3200 {
3201 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3202 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3203 unsigned long lock_flags = 0;
3204 int i;
3205
3206 ENTER;
3207 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3208 ioa_cfg->dump = NULL;
3209 ioa_cfg->sdt_state = INACTIVE;
3210 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3211
3212 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3213 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3214
3215 vfree(dump->ioa_dump.ioa_data);
3216 kfree(dump);
3217 LEAVE;
3218 }
3219
3220 /**
3221 * ipr_worker_thread - Worker thread
3222 * @work: ioa config struct
3223 *
3224 * Called at task level from a work thread. This function takes care
3225 * of adding and removing device from the mid-layer as configuration
3226 * changes are detected by the adapter.
3227 *
3228 * Return value:
3229 * nothing
3230 **/
3231 static void ipr_worker_thread(struct work_struct *work)
3232 {
3233 unsigned long lock_flags;
3234 struct ipr_resource_entry *res;
3235 struct scsi_device *sdev;
3236 struct ipr_dump *dump;
3237 struct ipr_ioa_cfg *ioa_cfg =
3238 container_of(work, struct ipr_ioa_cfg, work_q);
3239 u8 bus, target, lun;
3240 int did_work;
3241
3242 ENTER;
3243 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3244
3245 if (ioa_cfg->sdt_state == READ_DUMP) {
3246 dump = ioa_cfg->dump;
3247 if (!dump) {
3248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3249 return;
3250 }
3251 kref_get(&dump->kref);
3252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3253 ipr_get_ioa_dump(ioa_cfg, dump);
3254 kref_put(&dump->kref, ipr_release_dump);
3255
3256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3258 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3260 return;
3261 }
3262
3263 restart:
3264 do {
3265 did_work = 0;
3266 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3267 !ioa_cfg->allow_ml_add_del) {
3268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269 return;
3270 }
3271
3272 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3273 if (res->del_from_ml && res->sdev) {
3274 did_work = 1;
3275 sdev = res->sdev;
3276 if (!scsi_device_get(sdev)) {
3277 if (!res->add_to_ml)
3278 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3279 else
3280 res->del_from_ml = 0;
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 scsi_remove_device(sdev);
3283 scsi_device_put(sdev);
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3285 }
3286 break;
3287 }
3288 }
3289 } while (did_work);
3290
3291 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3292 if (res->add_to_ml) {
3293 bus = res->bus;
3294 target = res->target;
3295 lun = res->lun;
3296 res->add_to_ml = 0;
3297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 scsi_add_device(ioa_cfg->host, bus, target, lun);
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300 goto restart;
3301 }
3302 }
3303
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3306 LEAVE;
3307 }
3308
3309 #ifdef CONFIG_SCSI_IPR_TRACE
3310 /**
3311 * ipr_read_trace - Dump the adapter trace
3312 * @filp: open sysfs file
3313 * @kobj: kobject struct
3314 * @bin_attr: bin_attribute struct
3315 * @buf: buffer
3316 * @off: offset
3317 * @count: buffer size
3318 *
3319 * Return value:
3320 * number of bytes printed to buffer
3321 **/
3322 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3323 struct bin_attribute *bin_attr,
3324 char *buf, loff_t off, size_t count)
3325 {
3326 struct device *dev = container_of(kobj, struct device, kobj);
3327 struct Scsi_Host *shost = class_to_shost(dev);
3328 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3329 unsigned long lock_flags = 0;
3330 ssize_t ret;
3331
3332 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3333 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3334 IPR_TRACE_SIZE);
3335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3336
3337 return ret;
3338 }
3339
3340 static struct bin_attribute ipr_trace_attr = {
3341 .attr = {
3342 .name = "trace",
3343 .mode = S_IRUGO,
3344 },
3345 .size = 0,
3346 .read = ipr_read_trace,
3347 };
3348 #endif
3349
3350 /**
3351 * ipr_show_fw_version - Show the firmware version
3352 * @dev: class device struct
3353 * @buf: buffer
3354 *
3355 * Return value:
3356 * number of bytes printed to buffer
3357 **/
3358 static ssize_t ipr_show_fw_version(struct device *dev,
3359 struct device_attribute *attr, char *buf)
3360 {
3361 struct Scsi_Host *shost = class_to_shost(dev);
3362 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3363 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3364 unsigned long lock_flags = 0;
3365 int len;
3366
3367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3369 ucode_vpd->major_release, ucode_vpd->card_type,
3370 ucode_vpd->minor_release[0],
3371 ucode_vpd->minor_release[1]);
3372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3373 return len;
3374 }
3375
3376 static struct device_attribute ipr_fw_version_attr = {
3377 .attr = {
3378 .name = "fw_version",
3379 .mode = S_IRUGO,
3380 },
3381 .show = ipr_show_fw_version,
3382 };
3383
3384 /**
3385 * ipr_show_log_level - Show the adapter's error logging level
3386 * @dev: class device struct
3387 * @buf: buffer
3388 *
3389 * Return value:
3390 * number of bytes printed to buffer
3391 **/
3392 static ssize_t ipr_show_log_level(struct device *dev,
3393 struct device_attribute *attr, char *buf)
3394 {
3395 struct Scsi_Host *shost = class_to_shost(dev);
3396 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3397 unsigned long lock_flags = 0;
3398 int len;
3399
3400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3401 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403 return len;
3404 }
3405
3406 /**
3407 * ipr_store_log_level - Change the adapter's error logging level
3408 * @dev: class device struct
3409 * @buf: buffer
3410 *
3411 * Return value:
3412 * number of bytes printed to buffer
3413 **/
3414 static ssize_t ipr_store_log_level(struct device *dev,
3415 struct device_attribute *attr,
3416 const char *buf, size_t count)
3417 {
3418 struct Scsi_Host *shost = class_to_shost(dev);
3419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3420 unsigned long lock_flags = 0;
3421
3422 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3423 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 return strlen(buf);
3426 }
3427
3428 static struct device_attribute ipr_log_level_attr = {
3429 .attr = {
3430 .name = "log_level",
3431 .mode = S_IRUGO | S_IWUSR,
3432 },
3433 .show = ipr_show_log_level,
3434 .store = ipr_store_log_level
3435 };
3436
3437 /**
3438 * ipr_store_diagnostics - IOA Diagnostics interface
3439 * @dev: device struct
3440 * @buf: buffer
3441 * @count: buffer size
3442 *
3443 * This function will reset the adapter and wait a reasonable
3444 * amount of time for any errors that the adapter might log.
3445 *
3446 * Return value:
3447 * count on success / other on failure
3448 **/
3449 static ssize_t ipr_store_diagnostics(struct device *dev,
3450 struct device_attribute *attr,
3451 const char *buf, size_t count)
3452 {
3453 struct Scsi_Host *shost = class_to_shost(dev);
3454 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3455 unsigned long lock_flags = 0;
3456 int rc = count;
3457
3458 if (!capable(CAP_SYS_ADMIN))
3459 return -EACCES;
3460
3461 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3462 while (ioa_cfg->in_reset_reload) {
3463 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3464 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3466 }
3467
3468 ioa_cfg->errors_logged = 0;
3469 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3470
3471 if (ioa_cfg->in_reset_reload) {
3472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3473 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3474
3475 /* Wait for a second for any errors to be logged */
3476 msleep(1000);
3477 } else {
3478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3479 return -EIO;
3480 }
3481
3482 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3483 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3484 rc = -EIO;
3485 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3486
3487 return rc;
3488 }
3489
3490 static struct device_attribute ipr_diagnostics_attr = {
3491 .attr = {
3492 .name = "run_diagnostics",
3493 .mode = S_IWUSR,
3494 },
3495 .store = ipr_store_diagnostics
3496 };
3497
3498 /**
3499 * ipr_show_adapter_state - Show the adapter's state
3500 * @class_dev: device struct
3501 * @buf: buffer
3502 *
3503 * Return value:
3504 * number of bytes printed to buffer
3505 **/
3506 static ssize_t ipr_show_adapter_state(struct device *dev,
3507 struct device_attribute *attr, char *buf)
3508 {
3509 struct Scsi_Host *shost = class_to_shost(dev);
3510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 unsigned long lock_flags = 0;
3512 int len;
3513
3514 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3515 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3516 len = snprintf(buf, PAGE_SIZE, "offline\n");
3517 else
3518 len = snprintf(buf, PAGE_SIZE, "online\n");
3519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3520 return len;
3521 }
3522
3523 /**
3524 * ipr_store_adapter_state - Change adapter state
3525 * @dev: device struct
3526 * @buf: buffer
3527 * @count: buffer size
3528 *
3529 * This function will change the adapter's state.
3530 *
3531 * Return value:
3532 * count on success / other on failure
3533 **/
3534 static ssize_t ipr_store_adapter_state(struct device *dev,
3535 struct device_attribute *attr,
3536 const char *buf, size_t count)
3537 {
3538 struct Scsi_Host *shost = class_to_shost(dev);
3539 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3540 unsigned long lock_flags;
3541 int result = count, i;
3542
3543 if (!capable(CAP_SYS_ADMIN))
3544 return -EACCES;
3545
3546 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3547 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3548 !strncmp(buf, "online", 6)) {
3549 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3550 spin_lock(&ioa_cfg->hrrq[i]._lock);
3551 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3552 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3553 }
3554 wmb();
3555 ioa_cfg->reset_retries = 0;
3556 ioa_cfg->in_ioa_bringdown = 0;
3557 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3558 }
3559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3561
3562 return result;
3563 }
3564
3565 static struct device_attribute ipr_ioa_state_attr = {
3566 .attr = {
3567 .name = "online_state",
3568 .mode = S_IRUGO | S_IWUSR,
3569 },
3570 .show = ipr_show_adapter_state,
3571 .store = ipr_store_adapter_state
3572 };
3573
3574 /**
3575 * ipr_store_reset_adapter - Reset the adapter
3576 * @dev: device struct
3577 * @buf: buffer
3578 * @count: buffer size
3579 *
3580 * This function will reset the adapter.
3581 *
3582 * Return value:
3583 * count on success / other on failure
3584 **/
3585 static ssize_t ipr_store_reset_adapter(struct device *dev,
3586 struct device_attribute *attr,
3587 const char *buf, size_t count)
3588 {
3589 struct Scsi_Host *shost = class_to_shost(dev);
3590 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3591 unsigned long lock_flags;
3592 int result = count;
3593
3594 if (!capable(CAP_SYS_ADMIN))
3595 return -EACCES;
3596
3597 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3598 if (!ioa_cfg->in_reset_reload)
3599 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3600 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3601 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3602
3603 return result;
3604 }
3605
3606 static struct device_attribute ipr_ioa_reset_attr = {
3607 .attr = {
3608 .name = "reset_host",
3609 .mode = S_IWUSR,
3610 },
3611 .store = ipr_store_reset_adapter
3612 };
3613
3614 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3615 /**
3616 * ipr_show_iopoll_weight - Show ipr polling mode
3617 * @dev: class device struct
3618 * @buf: buffer
3619 *
3620 * Return value:
3621 * number of bytes printed to buffer
3622 **/
3623 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3624 struct device_attribute *attr, char *buf)
3625 {
3626 struct Scsi_Host *shost = class_to_shost(dev);
3627 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3628 unsigned long lock_flags = 0;
3629 int len;
3630
3631 spin_lock_irqsave(shost->host_lock, lock_flags);
3632 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3633 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3634
3635 return len;
3636 }
3637
3638 /**
3639 * ipr_store_iopoll_weight - Change the adapter's polling mode
3640 * @dev: class device struct
3641 * @buf: buffer
3642 *
3643 * Return value:
3644 * number of bytes printed to buffer
3645 **/
3646 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3647 struct device_attribute *attr,
3648 const char *buf, size_t count)
3649 {
3650 struct Scsi_Host *shost = class_to_shost(dev);
3651 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3652 unsigned long user_iopoll_weight;
3653 unsigned long lock_flags = 0;
3654 int i;
3655
3656 if (!ioa_cfg->sis64) {
3657 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3658 return -EINVAL;
3659 }
3660 if (kstrtoul(buf, 10, &user_iopoll_weight))
3661 return -EINVAL;
3662
3663 if (user_iopoll_weight > 256) {
3664 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3665 return -EINVAL;
3666 }
3667
3668 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3669 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3670 return strlen(buf);
3671 }
3672
3673 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3674 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3675 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3676 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3677 }
3678
3679 spin_lock_irqsave(shost->host_lock, lock_flags);
3680 ioa_cfg->iopoll_weight = user_iopoll_weight;
3681 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3682 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3683 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3684 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3685 ioa_cfg->iopoll_weight, ipr_iopoll);
3686 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3687 }
3688 }
3689 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3690
3691 return strlen(buf);
3692 }
3693
3694 static struct device_attribute ipr_iopoll_weight_attr = {
3695 .attr = {
3696 .name = "iopoll_weight",
3697 .mode = S_IRUGO | S_IWUSR,
3698 },
3699 .show = ipr_show_iopoll_weight,
3700 .store = ipr_store_iopoll_weight
3701 };
3702
3703 /**
3704 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3705 * @buf_len: buffer length
3706 *
3707 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3708 * list to use for microcode download
3709 *
3710 * Return value:
3711 * pointer to sglist / NULL on failure
3712 **/
3713 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3714 {
3715 int sg_size, order, bsize_elem, num_elem, i, j;
3716 struct ipr_sglist *sglist;
3717 struct scatterlist *scatterlist;
3718 struct page *page;
3719
3720 /* Get the minimum size per scatter/gather element */
3721 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3722
3723 /* Get the actual size per element */
3724 order = get_order(sg_size);
3725
3726 /* Determine the actual number of bytes per element */
3727 bsize_elem = PAGE_SIZE * (1 << order);
3728
3729 /* Determine the actual number of sg entries needed */
3730 if (buf_len % bsize_elem)
3731 num_elem = (buf_len / bsize_elem) + 1;
3732 else
3733 num_elem = buf_len / bsize_elem;
3734
3735 /* Allocate a scatter/gather list for the DMA */
3736 sglist = kzalloc(sizeof(struct ipr_sglist) +
3737 (sizeof(struct scatterlist) * (num_elem - 1)),
3738 GFP_KERNEL);
3739
3740 if (sglist == NULL) {
3741 ipr_trace;
3742 return NULL;
3743 }
3744
3745 scatterlist = sglist->scatterlist;
3746 sg_init_table(scatterlist, num_elem);
3747
3748 sglist->order = order;
3749 sglist->num_sg = num_elem;
3750
3751 /* Allocate a bunch of sg elements */
3752 for (i = 0; i < num_elem; i++) {
3753 page = alloc_pages(GFP_KERNEL, order);
3754 if (!page) {
3755 ipr_trace;
3756
3757 /* Free up what we already allocated */
3758 for (j = i - 1; j >= 0; j--)
3759 __free_pages(sg_page(&scatterlist[j]), order);
3760 kfree(sglist);
3761 return NULL;
3762 }
3763
3764 sg_set_page(&scatterlist[i], page, 0, 0);
3765 }
3766
3767 return sglist;
3768 }
3769
3770 /**
3771 * ipr_free_ucode_buffer - Frees a microcode download buffer
3772 * @p_dnld: scatter/gather list pointer
3773 *
3774 * Free a DMA'able ucode download buffer previously allocated with
3775 * ipr_alloc_ucode_buffer
3776 *
3777 * Return value:
3778 * nothing
3779 **/
3780 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3781 {
3782 int i;
3783
3784 for (i = 0; i < sglist->num_sg; i++)
3785 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3786
3787 kfree(sglist);
3788 }
3789
3790 /**
3791 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3792 * @sglist: scatter/gather list pointer
3793 * @buffer: buffer pointer
3794 * @len: buffer length
3795 *
3796 * Copy a microcode image from a user buffer into a buffer allocated by
3797 * ipr_alloc_ucode_buffer
3798 *
3799 * Return value:
3800 * 0 on success / other on failure
3801 **/
3802 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3803 u8 *buffer, u32 len)
3804 {
3805 int bsize_elem, i, result = 0;
3806 struct scatterlist *scatterlist;
3807 void *kaddr;
3808
3809 /* Determine the actual number of bytes per element */
3810 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3811
3812 scatterlist = sglist->scatterlist;
3813
3814 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3815 struct page *page = sg_page(&scatterlist[i]);
3816
3817 kaddr = kmap(page);
3818 memcpy(kaddr, buffer, bsize_elem);
3819 kunmap(page);
3820
3821 scatterlist[i].length = bsize_elem;
3822
3823 if (result != 0) {
3824 ipr_trace;
3825 return result;
3826 }
3827 }
3828
3829 if (len % bsize_elem) {
3830 struct page *page = sg_page(&scatterlist[i]);
3831
3832 kaddr = kmap(page);
3833 memcpy(kaddr, buffer, len % bsize_elem);
3834 kunmap(page);
3835
3836 scatterlist[i].length = len % bsize_elem;
3837 }
3838
3839 sglist->buffer_len = len;
3840 return result;
3841 }
3842
3843 /**
3844 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3845 * @ipr_cmd: ipr command struct
3846 * @sglist: scatter/gather list
3847 *
3848 * Builds a microcode download IOA data list (IOADL).
3849 *
3850 **/
3851 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3852 struct ipr_sglist *sglist)
3853 {
3854 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3855 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3856 struct scatterlist *scatterlist = sglist->scatterlist;
3857 int i;
3858
3859 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3860 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3861 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3862
3863 ioarcb->ioadl_len =
3864 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3865 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3866 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3867 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3868 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3869 }
3870
3871 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3872 }
3873
3874 /**
3875 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3876 * @ipr_cmd: ipr command struct
3877 * @sglist: scatter/gather list
3878 *
3879 * Builds a microcode download IOA data list (IOADL).
3880 *
3881 **/
3882 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3883 struct ipr_sglist *sglist)
3884 {
3885 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3886 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3887 struct scatterlist *scatterlist = sglist->scatterlist;
3888 int i;
3889
3890 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3891 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3892 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3893
3894 ioarcb->ioadl_len =
3895 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3896
3897 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3898 ioadl[i].flags_and_data_len =
3899 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3900 ioadl[i].address =
3901 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3902 }
3903
3904 ioadl[i-1].flags_and_data_len |=
3905 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3906 }
3907
3908 /**
3909 * ipr_update_ioa_ucode - Update IOA's microcode
3910 * @ioa_cfg: ioa config struct
3911 * @sglist: scatter/gather list
3912 *
3913 * Initiate an adapter reset to update the IOA's microcode
3914 *
3915 * Return value:
3916 * 0 on success / -EIO on failure
3917 **/
3918 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3919 struct ipr_sglist *sglist)
3920 {
3921 unsigned long lock_flags;
3922
3923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924 while (ioa_cfg->in_reset_reload) {
3925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3926 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3928 }
3929
3930 if (ioa_cfg->ucode_sglist) {
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932 dev_err(&ioa_cfg->pdev->dev,
3933 "Microcode download already in progress\n");
3934 return -EIO;
3935 }
3936
3937 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3938 sglist->num_sg, DMA_TO_DEVICE);
3939
3940 if (!sglist->num_dma_sg) {
3941 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3942 dev_err(&ioa_cfg->pdev->dev,
3943 "Failed to map microcode download buffer!\n");
3944 return -EIO;
3945 }
3946
3947 ioa_cfg->ucode_sglist = sglist;
3948 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3951
3952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3953 ioa_cfg->ucode_sglist = NULL;
3954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3955 return 0;
3956 }
3957
3958 /**
3959 * ipr_store_update_fw - Update the firmware on the adapter
3960 * @class_dev: device struct
3961 * @buf: buffer
3962 * @count: buffer size
3963 *
3964 * This function will update the firmware on the adapter.
3965 *
3966 * Return value:
3967 * count on success / other on failure
3968 **/
3969 static ssize_t ipr_store_update_fw(struct device *dev,
3970 struct device_attribute *attr,
3971 const char *buf, size_t count)
3972 {
3973 struct Scsi_Host *shost = class_to_shost(dev);
3974 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3975 struct ipr_ucode_image_header *image_hdr;
3976 const struct firmware *fw_entry;
3977 struct ipr_sglist *sglist;
3978 char fname[100];
3979 char *src;
3980 int len, result, dnld_size;
3981
3982 if (!capable(CAP_SYS_ADMIN))
3983 return -EACCES;
3984
3985 len = snprintf(fname, 99, "%s", buf);
3986 fname[len-1] = '\0';
3987
3988 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3989 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3990 return -EIO;
3991 }
3992
3993 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3994
3995 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3996 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3997 sglist = ipr_alloc_ucode_buffer(dnld_size);
3998
3999 if (!sglist) {
4000 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4001 release_firmware(fw_entry);
4002 return -ENOMEM;
4003 }
4004
4005 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4006
4007 if (result) {
4008 dev_err(&ioa_cfg->pdev->dev,
4009 "Microcode buffer copy to DMA buffer failed\n");
4010 goto out;
4011 }
4012
4013 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4014
4015 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4016
4017 if (!result)
4018 result = count;
4019 out:
4020 ipr_free_ucode_buffer(sglist);
4021 release_firmware(fw_entry);
4022 return result;
4023 }
4024
4025 static struct device_attribute ipr_update_fw_attr = {
4026 .attr = {
4027 .name = "update_fw",
4028 .mode = S_IWUSR,
4029 },
4030 .store = ipr_store_update_fw
4031 };
4032
4033 /**
4034 * ipr_show_fw_type - Show the adapter's firmware type.
4035 * @dev: class device struct
4036 * @buf: buffer
4037 *
4038 * Return value:
4039 * number of bytes printed to buffer
4040 **/
4041 static ssize_t ipr_show_fw_type(struct device *dev,
4042 struct device_attribute *attr, char *buf)
4043 {
4044 struct Scsi_Host *shost = class_to_shost(dev);
4045 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4046 unsigned long lock_flags = 0;
4047 int len;
4048
4049 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4050 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4052 return len;
4053 }
4054
4055 static struct device_attribute ipr_ioa_fw_type_attr = {
4056 .attr = {
4057 .name = "fw_type",
4058 .mode = S_IRUGO,
4059 },
4060 .show = ipr_show_fw_type
4061 };
4062
4063 static struct device_attribute *ipr_ioa_attrs[] = {
4064 &ipr_fw_version_attr,
4065 &ipr_log_level_attr,
4066 &ipr_diagnostics_attr,
4067 &ipr_ioa_state_attr,
4068 &ipr_ioa_reset_attr,
4069 &ipr_update_fw_attr,
4070 &ipr_ioa_fw_type_attr,
4071 &ipr_iopoll_weight_attr,
4072 NULL,
4073 };
4074
4075 #ifdef CONFIG_SCSI_IPR_DUMP
4076 /**
4077 * ipr_read_dump - Dump the adapter
4078 * @filp: open sysfs file
4079 * @kobj: kobject struct
4080 * @bin_attr: bin_attribute struct
4081 * @buf: buffer
4082 * @off: offset
4083 * @count: buffer size
4084 *
4085 * Return value:
4086 * number of bytes printed to buffer
4087 **/
4088 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4089 struct bin_attribute *bin_attr,
4090 char *buf, loff_t off, size_t count)
4091 {
4092 struct device *cdev = container_of(kobj, struct device, kobj);
4093 struct Scsi_Host *shost = class_to_shost(cdev);
4094 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4095 struct ipr_dump *dump;
4096 unsigned long lock_flags = 0;
4097 char *src;
4098 int len, sdt_end;
4099 size_t rc = count;
4100
4101 if (!capable(CAP_SYS_ADMIN))
4102 return -EACCES;
4103
4104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4105 dump = ioa_cfg->dump;
4106
4107 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4109 return 0;
4110 }
4111 kref_get(&dump->kref);
4112 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4113
4114 if (off > dump->driver_dump.hdr.len) {
4115 kref_put(&dump->kref, ipr_release_dump);
4116 return 0;
4117 }
4118
4119 if (off + count > dump->driver_dump.hdr.len) {
4120 count = dump->driver_dump.hdr.len - off;
4121 rc = count;
4122 }
4123
4124 if (count && off < sizeof(dump->driver_dump)) {
4125 if (off + count > sizeof(dump->driver_dump))
4126 len = sizeof(dump->driver_dump) - off;
4127 else
4128 len = count;
4129 src = (u8 *)&dump->driver_dump + off;
4130 memcpy(buf, src, len);
4131 buf += len;
4132 off += len;
4133 count -= len;
4134 }
4135
4136 off -= sizeof(dump->driver_dump);
4137
4138 if (ioa_cfg->sis64)
4139 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4140 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4141 sizeof(struct ipr_sdt_entry));
4142 else
4143 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4144 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4145
4146 if (count && off < sdt_end) {
4147 if (off + count > sdt_end)
4148 len = sdt_end - off;
4149 else
4150 len = count;
4151 src = (u8 *)&dump->ioa_dump + off;
4152 memcpy(buf, src, len);
4153 buf += len;
4154 off += len;
4155 count -= len;
4156 }
4157
4158 off -= sdt_end;
4159
4160 while (count) {
4161 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4162 len = PAGE_ALIGN(off) - off;
4163 else
4164 len = count;
4165 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4166 src += off & ~PAGE_MASK;
4167 memcpy(buf, src, len);
4168 buf += len;
4169 off += len;
4170 count -= len;
4171 }
4172
4173 kref_put(&dump->kref, ipr_release_dump);
4174 return rc;
4175 }
4176
4177 /**
4178 * ipr_alloc_dump - Prepare for adapter dump
4179 * @ioa_cfg: ioa config struct
4180 *
4181 * Return value:
4182 * 0 on success / other on failure
4183 **/
4184 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4185 {
4186 struct ipr_dump *dump;
4187 __be32 **ioa_data;
4188 unsigned long lock_flags = 0;
4189
4190 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4191
4192 if (!dump) {
4193 ipr_err("Dump memory allocation failed\n");
4194 return -ENOMEM;
4195 }
4196
4197 if (ioa_cfg->sis64)
4198 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4199 else
4200 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4201
4202 if (!ioa_data) {
4203 ipr_err("Dump memory allocation failed\n");
4204 kfree(dump);
4205 return -ENOMEM;
4206 }
4207
4208 dump->ioa_dump.ioa_data = ioa_data;
4209
4210 kref_init(&dump->kref);
4211 dump->ioa_cfg = ioa_cfg;
4212
4213 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4214
4215 if (INACTIVE != ioa_cfg->sdt_state) {
4216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4217 vfree(dump->ioa_dump.ioa_data);
4218 kfree(dump);
4219 return 0;
4220 }
4221
4222 ioa_cfg->dump = dump;
4223 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4224 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4225 ioa_cfg->dump_taken = 1;
4226 schedule_work(&ioa_cfg->work_q);
4227 }
4228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4229
4230 return 0;
4231 }
4232
4233 /**
4234 * ipr_free_dump - Free adapter dump memory
4235 * @ioa_cfg: ioa config struct
4236 *
4237 * Return value:
4238 * 0 on success / other on failure
4239 **/
4240 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4241 {
4242 struct ipr_dump *dump;
4243 unsigned long lock_flags = 0;
4244
4245 ENTER;
4246
4247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4248 dump = ioa_cfg->dump;
4249 if (!dump) {
4250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4251 return 0;
4252 }
4253
4254 ioa_cfg->dump = NULL;
4255 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4256
4257 kref_put(&dump->kref, ipr_release_dump);
4258
4259 LEAVE;
4260 return 0;
4261 }
4262
4263 /**
4264 * ipr_write_dump - Setup dump state of adapter
4265 * @filp: open sysfs file
4266 * @kobj: kobject struct
4267 * @bin_attr: bin_attribute struct
4268 * @buf: buffer
4269 * @off: offset
4270 * @count: buffer size
4271 *
4272 * Return value:
4273 * number of bytes printed to buffer
4274 **/
4275 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4276 struct bin_attribute *bin_attr,
4277 char *buf, loff_t off, size_t count)
4278 {
4279 struct device *cdev = container_of(kobj, struct device, kobj);
4280 struct Scsi_Host *shost = class_to_shost(cdev);
4281 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4282 int rc;
4283
4284 if (!capable(CAP_SYS_ADMIN))
4285 return -EACCES;
4286
4287 if (buf[0] == '1')
4288 rc = ipr_alloc_dump(ioa_cfg);
4289 else if (buf[0] == '0')
4290 rc = ipr_free_dump(ioa_cfg);
4291 else
4292 return -EINVAL;
4293
4294 if (rc)
4295 return rc;
4296 else
4297 return count;
4298 }
4299
4300 static struct bin_attribute ipr_dump_attr = {
4301 .attr = {
4302 .name = "dump",
4303 .mode = S_IRUSR | S_IWUSR,
4304 },
4305 .size = 0,
4306 .read = ipr_read_dump,
4307 .write = ipr_write_dump
4308 };
4309 #else
4310 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4311 #endif
4312
4313 /**
4314 * ipr_change_queue_depth - Change the device's queue depth
4315 * @sdev: scsi device struct
4316 * @qdepth: depth to set
4317 * @reason: calling context
4318 *
4319 * Return value:
4320 * actual depth set
4321 **/
4322 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4323 int reason)
4324 {
4325 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4326 struct ipr_resource_entry *res;
4327 unsigned long lock_flags = 0;
4328
4329 if (reason != SCSI_QDEPTH_DEFAULT)
4330 return -EOPNOTSUPP;
4331
4332 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4333 res = (struct ipr_resource_entry *)sdev->hostdata;
4334
4335 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4336 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4338
4339 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4340 return sdev->queue_depth;
4341 }
4342
4343 /**
4344 * ipr_change_queue_type - Change the device's queue type
4345 * @dsev: scsi device struct
4346 * @tag_type: type of tags to use
4347 *
4348 * Return value:
4349 * actual queue type set
4350 **/
4351 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4352 {
4353 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4354 struct ipr_resource_entry *res;
4355 unsigned long lock_flags = 0;
4356
4357 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4358 res = (struct ipr_resource_entry *)sdev->hostdata;
4359
4360 if (res) {
4361 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4362 /*
4363 * We don't bother quiescing the device here since the
4364 * adapter firmware does it for us.
4365 */
4366 scsi_set_tag_type(sdev, tag_type);
4367
4368 if (tag_type)
4369 scsi_activate_tcq(sdev, sdev->queue_depth);
4370 else
4371 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4372 } else
4373 tag_type = 0;
4374 } else
4375 tag_type = 0;
4376
4377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4378 return tag_type;
4379 }
4380
4381 /**
4382 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4383 * @dev: device struct
4384 * @attr: device attribute structure
4385 * @buf: buffer
4386 *
4387 * Return value:
4388 * number of bytes printed to buffer
4389 **/
4390 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4391 {
4392 struct scsi_device *sdev = to_scsi_device(dev);
4393 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4394 struct ipr_resource_entry *res;
4395 unsigned long lock_flags = 0;
4396 ssize_t len = -ENXIO;
4397
4398 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4399 res = (struct ipr_resource_entry *)sdev->hostdata;
4400 if (res)
4401 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4403 return len;
4404 }
4405
4406 static struct device_attribute ipr_adapter_handle_attr = {
4407 .attr = {
4408 .name = "adapter_handle",
4409 .mode = S_IRUSR,
4410 },
4411 .show = ipr_show_adapter_handle
4412 };
4413
4414 /**
4415 * ipr_show_resource_path - Show the resource path or the resource address for
4416 * this device.
4417 * @dev: device struct
4418 * @attr: device attribute structure
4419 * @buf: buffer
4420 *
4421 * Return value:
4422 * number of bytes printed to buffer
4423 **/
4424 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4425 {
4426 struct scsi_device *sdev = to_scsi_device(dev);
4427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4428 struct ipr_resource_entry *res;
4429 unsigned long lock_flags = 0;
4430 ssize_t len = -ENXIO;
4431 char buffer[IPR_MAX_RES_PATH_LENGTH];
4432
4433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4434 res = (struct ipr_resource_entry *)sdev->hostdata;
4435 if (res && ioa_cfg->sis64)
4436 len = snprintf(buf, PAGE_SIZE, "%s\n",
4437 __ipr_format_res_path(res->res_path, buffer,
4438 sizeof(buffer)));
4439 else if (res)
4440 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4441 res->bus, res->target, res->lun);
4442
4443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4444 return len;
4445 }
4446
4447 static struct device_attribute ipr_resource_path_attr = {
4448 .attr = {
4449 .name = "resource_path",
4450 .mode = S_IRUGO,
4451 },
4452 .show = ipr_show_resource_path
4453 };
4454
4455 /**
4456 * ipr_show_device_id - Show the device_id for this device.
4457 * @dev: device struct
4458 * @attr: device attribute structure
4459 * @buf: buffer
4460 *
4461 * Return value:
4462 * number of bytes printed to buffer
4463 **/
4464 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4465 {
4466 struct scsi_device *sdev = to_scsi_device(dev);
4467 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4468 struct ipr_resource_entry *res;
4469 unsigned long lock_flags = 0;
4470 ssize_t len = -ENXIO;
4471
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = (struct ipr_resource_entry *)sdev->hostdata;
4474 if (res && ioa_cfg->sis64)
4475 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4476 else if (res)
4477 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4478
4479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480 return len;
4481 }
4482
4483 static struct device_attribute ipr_device_id_attr = {
4484 .attr = {
4485 .name = "device_id",
4486 .mode = S_IRUGO,
4487 },
4488 .show = ipr_show_device_id
4489 };
4490
4491 /**
4492 * ipr_show_resource_type - Show the resource type for this device.
4493 * @dev: device struct
4494 * @attr: device attribute structure
4495 * @buf: buffer
4496 *
4497 * Return value:
4498 * number of bytes printed to buffer
4499 **/
4500 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4501 {
4502 struct scsi_device *sdev = to_scsi_device(dev);
4503 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4504 struct ipr_resource_entry *res;
4505 unsigned long lock_flags = 0;
4506 ssize_t len = -ENXIO;
4507
4508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4509 res = (struct ipr_resource_entry *)sdev->hostdata;
4510
4511 if (res)
4512 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4513
4514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515 return len;
4516 }
4517
4518 static struct device_attribute ipr_resource_type_attr = {
4519 .attr = {
4520 .name = "resource_type",
4521 .mode = S_IRUGO,
4522 },
4523 .show = ipr_show_resource_type
4524 };
4525
4526 static struct device_attribute *ipr_dev_attrs[] = {
4527 &ipr_adapter_handle_attr,
4528 &ipr_resource_path_attr,
4529 &ipr_device_id_attr,
4530 &ipr_resource_type_attr,
4531 NULL,
4532 };
4533
4534 /**
4535 * ipr_biosparam - Return the HSC mapping
4536 * @sdev: scsi device struct
4537 * @block_device: block device pointer
4538 * @capacity: capacity of the device
4539 * @parm: Array containing returned HSC values.
4540 *
4541 * This function generates the HSC parms that fdisk uses.
4542 * We want to make sure we return something that places partitions
4543 * on 4k boundaries for best performance with the IOA.
4544 *
4545 * Return value:
4546 * 0 on success
4547 **/
4548 static int ipr_biosparam(struct scsi_device *sdev,
4549 struct block_device *block_device,
4550 sector_t capacity, int *parm)
4551 {
4552 int heads, sectors;
4553 sector_t cylinders;
4554
4555 heads = 128;
4556 sectors = 32;
4557
4558 cylinders = capacity;
4559 sector_div(cylinders, (128 * 32));
4560
4561 /* return result */
4562 parm[0] = heads;
4563 parm[1] = sectors;
4564 parm[2] = cylinders;
4565
4566 return 0;
4567 }
4568
4569 /**
4570 * ipr_find_starget - Find target based on bus/target.
4571 * @starget: scsi target struct
4572 *
4573 * Return value:
4574 * resource entry pointer if found / NULL if not found
4575 **/
4576 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4577 {
4578 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4580 struct ipr_resource_entry *res;
4581
4582 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4583 if ((res->bus == starget->channel) &&
4584 (res->target == starget->id)) {
4585 return res;
4586 }
4587 }
4588
4589 return NULL;
4590 }
4591
4592 static struct ata_port_info sata_port_info;
4593
4594 /**
4595 * ipr_target_alloc - Prepare for commands to a SCSI target
4596 * @starget: scsi target struct
4597 *
4598 * If the device is a SATA device, this function allocates an
4599 * ATA port with libata, else it does nothing.
4600 *
4601 * Return value:
4602 * 0 on success / non-0 on failure
4603 **/
4604 static int ipr_target_alloc(struct scsi_target *starget)
4605 {
4606 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4607 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4608 struct ipr_sata_port *sata_port;
4609 struct ata_port *ap;
4610 struct ipr_resource_entry *res;
4611 unsigned long lock_flags;
4612
4613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4614 res = ipr_find_starget(starget);
4615 starget->hostdata = NULL;
4616
4617 if (res && ipr_is_gata(res)) {
4618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4619 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4620 if (!sata_port)
4621 return -ENOMEM;
4622
4623 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4624 if (ap) {
4625 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4626 sata_port->ioa_cfg = ioa_cfg;
4627 sata_port->ap = ap;
4628 sata_port->res = res;
4629
4630 res->sata_port = sata_port;
4631 ap->private_data = sata_port;
4632 starget->hostdata = sata_port;
4633 } else {
4634 kfree(sata_port);
4635 return -ENOMEM;
4636 }
4637 }
4638 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639
4640 return 0;
4641 }
4642
4643 /**
4644 * ipr_target_destroy - Destroy a SCSI target
4645 * @starget: scsi target struct
4646 *
4647 * If the device was a SATA device, this function frees the libata
4648 * ATA port, else it does nothing.
4649 *
4650 **/
4651 static void ipr_target_destroy(struct scsi_target *starget)
4652 {
4653 struct ipr_sata_port *sata_port = starget->hostdata;
4654 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4655 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4656
4657 if (ioa_cfg->sis64) {
4658 if (!ipr_find_starget(starget)) {
4659 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4660 clear_bit(starget->id, ioa_cfg->array_ids);
4661 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4662 clear_bit(starget->id, ioa_cfg->vset_ids);
4663 else if (starget->channel == 0)
4664 clear_bit(starget->id, ioa_cfg->target_ids);
4665 }
4666 }
4667
4668 if (sata_port) {
4669 starget->hostdata = NULL;
4670 ata_sas_port_destroy(sata_port->ap);
4671 kfree(sata_port);
4672 }
4673 }
4674
4675 /**
4676 * ipr_find_sdev - Find device based on bus/target/lun.
4677 * @sdev: scsi device struct
4678 *
4679 * Return value:
4680 * resource entry pointer if found / NULL if not found
4681 **/
4682 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4683 {
4684 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4685 struct ipr_resource_entry *res;
4686
4687 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4688 if ((res->bus == sdev->channel) &&
4689 (res->target == sdev->id) &&
4690 (res->lun == sdev->lun))
4691 return res;
4692 }
4693
4694 return NULL;
4695 }
4696
4697 /**
4698 * ipr_slave_destroy - Unconfigure a SCSI device
4699 * @sdev: scsi device struct
4700 *
4701 * Return value:
4702 * nothing
4703 **/
4704 static void ipr_slave_destroy(struct scsi_device *sdev)
4705 {
4706 struct ipr_resource_entry *res;
4707 struct ipr_ioa_cfg *ioa_cfg;
4708 unsigned long lock_flags = 0;
4709
4710 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4711
4712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4713 res = (struct ipr_resource_entry *) sdev->hostdata;
4714 if (res) {
4715 if (res->sata_port)
4716 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4717 sdev->hostdata = NULL;
4718 res->sdev = NULL;
4719 res->sata_port = NULL;
4720 }
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 }
4723
4724 /**
4725 * ipr_slave_configure - Configure a SCSI device
4726 * @sdev: scsi device struct
4727 *
4728 * This function configures the specified scsi device.
4729 *
4730 * Return value:
4731 * 0 on success
4732 **/
4733 static int ipr_slave_configure(struct scsi_device *sdev)
4734 {
4735 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4736 struct ipr_resource_entry *res;
4737 struct ata_port *ap = NULL;
4738 unsigned long lock_flags = 0;
4739 char buffer[IPR_MAX_RES_PATH_LENGTH];
4740
4741 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4742 res = sdev->hostdata;
4743 if (res) {
4744 if (ipr_is_af_dasd_device(res))
4745 sdev->type = TYPE_RAID;
4746 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4747 sdev->scsi_level = 4;
4748 sdev->no_uld_attach = 1;
4749 }
4750 if (ipr_is_vset_device(res)) {
4751 blk_queue_rq_timeout(sdev->request_queue,
4752 IPR_VSET_RW_TIMEOUT);
4753 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4754 }
4755 if (ipr_is_gata(res) && res->sata_port)
4756 ap = res->sata_port->ap;
4757 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4758
4759 if (ap) {
4760 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4761 ata_sas_slave_configure(sdev, ap);
4762 } else
4763 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4764 if (ioa_cfg->sis64)
4765 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4766 ipr_format_res_path(ioa_cfg,
4767 res->res_path, buffer, sizeof(buffer)));
4768 return 0;
4769 }
4770 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4771 return 0;
4772 }
4773
4774 /**
4775 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4776 * @sdev: scsi device struct
4777 *
4778 * This function initializes an ATA port so that future commands
4779 * sent through queuecommand will work.
4780 *
4781 * Return value:
4782 * 0 on success
4783 **/
4784 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4785 {
4786 struct ipr_sata_port *sata_port = NULL;
4787 int rc = -ENXIO;
4788
4789 ENTER;
4790 if (sdev->sdev_target)
4791 sata_port = sdev->sdev_target->hostdata;
4792 if (sata_port) {
4793 rc = ata_sas_port_init(sata_port->ap);
4794 if (rc == 0)
4795 rc = ata_sas_sync_probe(sata_port->ap);
4796 }
4797
4798 if (rc)
4799 ipr_slave_destroy(sdev);
4800
4801 LEAVE;
4802 return rc;
4803 }
4804
4805 /**
4806 * ipr_slave_alloc - Prepare for commands to a device.
4807 * @sdev: scsi device struct
4808 *
4809 * This function saves a pointer to the resource entry
4810 * in the scsi device struct if the device exists. We
4811 * can then use this pointer in ipr_queuecommand when
4812 * handling new commands.
4813 *
4814 * Return value:
4815 * 0 on success / -ENXIO if device does not exist
4816 **/
4817 static int ipr_slave_alloc(struct scsi_device *sdev)
4818 {
4819 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4820 struct ipr_resource_entry *res;
4821 unsigned long lock_flags;
4822 int rc = -ENXIO;
4823
4824 sdev->hostdata = NULL;
4825
4826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4827
4828 res = ipr_find_sdev(sdev);
4829 if (res) {
4830 res->sdev = sdev;
4831 res->add_to_ml = 0;
4832 res->in_erp = 0;
4833 sdev->hostdata = res;
4834 if (!ipr_is_naca_model(res))
4835 res->needs_sync_complete = 1;
4836 rc = 0;
4837 if (ipr_is_gata(res)) {
4838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4839 return ipr_ata_slave_alloc(sdev);
4840 }
4841 }
4842
4843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4844
4845 return rc;
4846 }
4847
4848 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4849 {
4850 struct ipr_ioa_cfg *ioa_cfg;
4851 unsigned long lock_flags = 0;
4852 int rc = SUCCESS;
4853
4854 ENTER;
4855 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4856 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4857
4858 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4859 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4860 dev_err(&ioa_cfg->pdev->dev,
4861 "Adapter being reset as a result of error recovery.\n");
4862
4863 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4864 ioa_cfg->sdt_state = GET_DUMP;
4865 }
4866
4867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4868 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4869 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4870
4871 /* If we got hit with a host reset while we were already resetting
4872 the adapter for some reason, and the reset failed. */
4873 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4874 ipr_trace;
4875 rc = FAILED;
4876 }
4877
4878 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4879 LEAVE;
4880 return rc;
4881 }
4882
4883 /**
4884 * ipr_device_reset - Reset the device
4885 * @ioa_cfg: ioa config struct
4886 * @res: resource entry struct
4887 *
4888 * This function issues a device reset to the affected device.
4889 * If the device is a SCSI device, a LUN reset will be sent
4890 * to the device first. If that does not work, a target reset
4891 * will be sent. If the device is a SATA device, a PHY reset will
4892 * be sent.
4893 *
4894 * Return value:
4895 * 0 on success / non-zero on failure
4896 **/
4897 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4898 struct ipr_resource_entry *res)
4899 {
4900 struct ipr_cmnd *ipr_cmd;
4901 struct ipr_ioarcb *ioarcb;
4902 struct ipr_cmd_pkt *cmd_pkt;
4903 struct ipr_ioarcb_ata_regs *regs;
4904 u32 ioasc;
4905
4906 ENTER;
4907 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4908 ioarcb = &ipr_cmd->ioarcb;
4909 cmd_pkt = &ioarcb->cmd_pkt;
4910
4911 if (ipr_cmd->ioa_cfg->sis64) {
4912 regs = &ipr_cmd->i.ata_ioadl.regs;
4913 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4914 } else
4915 regs = &ioarcb->u.add_data.u.regs;
4916
4917 ioarcb->res_handle = res->res_handle;
4918 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4919 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4920 if (ipr_is_gata(res)) {
4921 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4922 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4923 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4924 }
4925
4926 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4927 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4928 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4929 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4930 if (ipr_cmd->ioa_cfg->sis64)
4931 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4932 sizeof(struct ipr_ioasa_gata));
4933 else
4934 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4935 sizeof(struct ipr_ioasa_gata));
4936 }
4937
4938 LEAVE;
4939 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4940 }
4941
4942 /**
4943 * ipr_sata_reset - Reset the SATA port
4944 * @link: SATA link to reset
4945 * @classes: class of the attached device
4946 *
4947 * This function issues a SATA phy reset to the affected ATA link.
4948 *
4949 * Return value:
4950 * 0 on success / non-zero on failure
4951 **/
4952 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4953 unsigned long deadline)
4954 {
4955 struct ipr_sata_port *sata_port = link->ap->private_data;
4956 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4957 struct ipr_resource_entry *res;
4958 unsigned long lock_flags = 0;
4959 int rc = -ENXIO;
4960
4961 ENTER;
4962 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4963 while (ioa_cfg->in_reset_reload) {
4964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4965 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4966 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4967 }
4968
4969 res = sata_port->res;
4970 if (res) {
4971 rc = ipr_device_reset(ioa_cfg, res);
4972 *classes = res->ata_class;
4973 }
4974
4975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4976 LEAVE;
4977 return rc;
4978 }
4979
4980 /**
4981 * ipr_eh_dev_reset - Reset the device
4982 * @scsi_cmd: scsi command struct
4983 *
4984 * This function issues a device reset to the affected device.
4985 * A LUN reset will be sent to the device first. If that does
4986 * not work, a target reset will be sent.
4987 *
4988 * Return value:
4989 * SUCCESS / FAILED
4990 **/
4991 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4992 {
4993 struct ipr_cmnd *ipr_cmd;
4994 struct ipr_ioa_cfg *ioa_cfg;
4995 struct ipr_resource_entry *res;
4996 struct ata_port *ap;
4997 int rc = 0;
4998 struct ipr_hrr_queue *hrrq;
4999
5000 ENTER;
5001 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5002 res = scsi_cmd->device->hostdata;
5003
5004 if (!res)
5005 return FAILED;
5006
5007 /*
5008 * If we are currently going through reset/reload, return failed. This will force the
5009 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5010 * reset to complete
5011 */
5012 if (ioa_cfg->in_reset_reload)
5013 return FAILED;
5014 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5015 return FAILED;
5016
5017 for_each_hrrq(hrrq, ioa_cfg) {
5018 spin_lock(&hrrq->_lock);
5019 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5020 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5021 if (ipr_cmd->scsi_cmd)
5022 ipr_cmd->done = ipr_scsi_eh_done;
5023 if (ipr_cmd->qc)
5024 ipr_cmd->done = ipr_sata_eh_done;
5025 if (ipr_cmd->qc &&
5026 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5027 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5028 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5029 }
5030 }
5031 }
5032 spin_unlock(&hrrq->_lock);
5033 }
5034 res->resetting_device = 1;
5035 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5036
5037 if (ipr_is_gata(res) && res->sata_port) {
5038 ap = res->sata_port->ap;
5039 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5040 ata_std_error_handler(ap);
5041 spin_lock_irq(scsi_cmd->device->host->host_lock);
5042
5043 for_each_hrrq(hrrq, ioa_cfg) {
5044 spin_lock(&hrrq->_lock);
5045 list_for_each_entry(ipr_cmd,
5046 &hrrq->hrrq_pending_q, queue) {
5047 if (ipr_cmd->ioarcb.res_handle ==
5048 res->res_handle) {
5049 rc = -EIO;
5050 break;
5051 }
5052 }
5053 spin_unlock(&hrrq->_lock);
5054 }
5055 } else
5056 rc = ipr_device_reset(ioa_cfg, res);
5057 res->resetting_device = 0;
5058 res->reset_occurred = 1;
5059
5060 LEAVE;
5061 return rc ? FAILED : SUCCESS;
5062 }
5063
5064 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5065 {
5066 int rc;
5067
5068 spin_lock_irq(cmd->device->host->host_lock);
5069 rc = __ipr_eh_dev_reset(cmd);
5070 spin_unlock_irq(cmd->device->host->host_lock);
5071
5072 return rc;
5073 }
5074
5075 /**
5076 * ipr_bus_reset_done - Op done function for bus reset.
5077 * @ipr_cmd: ipr command struct
5078 *
5079 * This function is the op done function for a bus reset
5080 *
5081 * Return value:
5082 * none
5083 **/
5084 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5085 {
5086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5087 struct ipr_resource_entry *res;
5088
5089 ENTER;
5090 if (!ioa_cfg->sis64)
5091 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5092 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5093 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5094 break;
5095 }
5096 }
5097
5098 /*
5099 * If abort has not completed, indicate the reset has, else call the
5100 * abort's done function to wake the sleeping eh thread
5101 */
5102 if (ipr_cmd->sibling->sibling)
5103 ipr_cmd->sibling->sibling = NULL;
5104 else
5105 ipr_cmd->sibling->done(ipr_cmd->sibling);
5106
5107 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5108 LEAVE;
5109 }
5110
5111 /**
5112 * ipr_abort_timeout - An abort task has timed out
5113 * @ipr_cmd: ipr command struct
5114 *
5115 * This function handles when an abort task times out. If this
5116 * happens we issue a bus reset since we have resources tied
5117 * up that must be freed before returning to the midlayer.
5118 *
5119 * Return value:
5120 * none
5121 **/
5122 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5123 {
5124 struct ipr_cmnd *reset_cmd;
5125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5126 struct ipr_cmd_pkt *cmd_pkt;
5127 unsigned long lock_flags = 0;
5128
5129 ENTER;
5130 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5131 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5133 return;
5134 }
5135
5136 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5137 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5138 ipr_cmd->sibling = reset_cmd;
5139 reset_cmd->sibling = ipr_cmd;
5140 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5141 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5142 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5143 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5144 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5145
5146 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5148 LEAVE;
5149 }
5150
5151 /**
5152 * ipr_cancel_op - Cancel specified op
5153 * @scsi_cmd: scsi command struct
5154 *
5155 * This function cancels specified op.
5156 *
5157 * Return value:
5158 * SUCCESS / FAILED
5159 **/
5160 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5161 {
5162 struct ipr_cmnd *ipr_cmd;
5163 struct ipr_ioa_cfg *ioa_cfg;
5164 struct ipr_resource_entry *res;
5165 struct ipr_cmd_pkt *cmd_pkt;
5166 u32 ioasc, int_reg;
5167 int op_found = 0;
5168 struct ipr_hrr_queue *hrrq;
5169
5170 ENTER;
5171 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5172 res = scsi_cmd->device->hostdata;
5173
5174 /* If we are currently going through reset/reload, return failed.
5175 * This will force the mid-layer to call ipr_eh_host_reset,
5176 * which will then go to sleep and wait for the reset to complete
5177 */
5178 if (ioa_cfg->in_reset_reload ||
5179 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5180 return FAILED;
5181 if (!res)
5182 return FAILED;
5183
5184 /*
5185 * If we are aborting a timed out op, chances are that the timeout was caused
5186 * by a still not detected EEH error. In such cases, reading a register will
5187 * trigger the EEH recovery infrastructure.
5188 */
5189 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5190
5191 if (!ipr_is_gscsi(res))
5192 return FAILED;
5193
5194 for_each_hrrq(hrrq, ioa_cfg) {
5195 spin_lock(&hrrq->_lock);
5196 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5197 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5198 ipr_cmd->done = ipr_scsi_eh_done;
5199 op_found = 1;
5200 break;
5201 }
5202 }
5203 spin_unlock(&hrrq->_lock);
5204 }
5205
5206 if (!op_found)
5207 return SUCCESS;
5208
5209 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5210 ipr_cmd->ioarcb.res_handle = res->res_handle;
5211 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5212 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5213 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5214 ipr_cmd->u.sdev = scsi_cmd->device;
5215
5216 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5217 scsi_cmd->cmnd[0]);
5218 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5219 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5220
5221 /*
5222 * If the abort task timed out and we sent a bus reset, we will get
5223 * one the following responses to the abort
5224 */
5225 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5226 ioasc = 0;
5227 ipr_trace;
5228 }
5229
5230 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5231 if (!ipr_is_naca_model(res))
5232 res->needs_sync_complete = 1;
5233
5234 LEAVE;
5235 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5236 }
5237
5238 /**
5239 * ipr_eh_abort - Abort a single op
5240 * @scsi_cmd: scsi command struct
5241 *
5242 * Return value:
5243 * SUCCESS / FAILED
5244 **/
5245 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5246 {
5247 unsigned long flags;
5248 int rc;
5249
5250 ENTER;
5251
5252 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5253 rc = ipr_cancel_op(scsi_cmd);
5254 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5255
5256 LEAVE;
5257 return rc;
5258 }
5259
5260 /**
5261 * ipr_handle_other_interrupt - Handle "other" interrupts
5262 * @ioa_cfg: ioa config struct
5263 * @int_reg: interrupt register
5264 *
5265 * Return value:
5266 * IRQ_NONE / IRQ_HANDLED
5267 **/
5268 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5269 u32 int_reg)
5270 {
5271 irqreturn_t rc = IRQ_HANDLED;
5272 u32 int_mask_reg;
5273
5274 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5275 int_reg &= ~int_mask_reg;
5276
5277 /* If an interrupt on the adapter did not occur, ignore it.
5278 * Or in the case of SIS 64, check for a stage change interrupt.
5279 */
5280 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5281 if (ioa_cfg->sis64) {
5282 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5283 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5284 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5285
5286 /* clear stage change */
5287 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5288 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5289 list_del(&ioa_cfg->reset_cmd->queue);
5290 del_timer(&ioa_cfg->reset_cmd->timer);
5291 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5292 return IRQ_HANDLED;
5293 }
5294 }
5295
5296 return IRQ_NONE;
5297 }
5298
5299 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5300 /* Mask the interrupt */
5301 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5302
5303 /* Clear the interrupt */
5304 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5305 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5306
5307 list_del(&ioa_cfg->reset_cmd->queue);
5308 del_timer(&ioa_cfg->reset_cmd->timer);
5309 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5310 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5311 if (ioa_cfg->clear_isr) {
5312 if (ipr_debug && printk_ratelimit())
5313 dev_err(&ioa_cfg->pdev->dev,
5314 "Spurious interrupt detected. 0x%08X\n", int_reg);
5315 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5316 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5317 return IRQ_NONE;
5318 }
5319 } else {
5320 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5321 ioa_cfg->ioa_unit_checked = 1;
5322 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5323 dev_err(&ioa_cfg->pdev->dev,
5324 "No Host RRQ. 0x%08X\n", int_reg);
5325 else
5326 dev_err(&ioa_cfg->pdev->dev,
5327 "Permanent IOA failure. 0x%08X\n", int_reg);
5328
5329 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5330 ioa_cfg->sdt_state = GET_DUMP;
5331
5332 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5333 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5334 }
5335
5336 return rc;
5337 }
5338
5339 /**
5340 * ipr_isr_eh - Interrupt service routine error handler
5341 * @ioa_cfg: ioa config struct
5342 * @msg: message to log
5343 *
5344 * Return value:
5345 * none
5346 **/
5347 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5348 {
5349 ioa_cfg->errors_logged++;
5350 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5351
5352 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5353 ioa_cfg->sdt_state = GET_DUMP;
5354
5355 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5356 }
5357
5358 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5359 struct list_head *doneq)
5360 {
5361 u32 ioasc;
5362 u16 cmd_index;
5363 struct ipr_cmnd *ipr_cmd;
5364 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5365 int num_hrrq = 0;
5366
5367 /* If interrupts are disabled, ignore the interrupt */
5368 if (!hrr_queue->allow_interrupts)
5369 return 0;
5370
5371 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5372 hrr_queue->toggle_bit) {
5373
5374 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5375 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5376 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5377
5378 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5379 cmd_index < hrr_queue->min_cmd_id)) {
5380 ipr_isr_eh(ioa_cfg,
5381 "Invalid response handle from IOA: ",
5382 cmd_index);
5383 break;
5384 }
5385
5386 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5387 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5388
5389 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5390
5391 list_move_tail(&ipr_cmd->queue, doneq);
5392
5393 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5394 hrr_queue->hrrq_curr++;
5395 } else {
5396 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5397 hrr_queue->toggle_bit ^= 1u;
5398 }
5399 num_hrrq++;
5400 if (budget > 0 && num_hrrq >= budget)
5401 break;
5402 }
5403
5404 return num_hrrq;
5405 }
5406
5407 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5408 {
5409 struct ipr_ioa_cfg *ioa_cfg;
5410 struct ipr_hrr_queue *hrrq;
5411 struct ipr_cmnd *ipr_cmd, *temp;
5412 unsigned long hrrq_flags;
5413 int completed_ops;
5414 LIST_HEAD(doneq);
5415
5416 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5417 ioa_cfg = hrrq->ioa_cfg;
5418
5419 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5420 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5421
5422 if (completed_ops < budget)
5423 blk_iopoll_complete(iop);
5424 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5425
5426 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5427 list_del(&ipr_cmd->queue);
5428 del_timer(&ipr_cmd->timer);
5429 ipr_cmd->fast_done(ipr_cmd);
5430 }
5431
5432 return completed_ops;
5433 }
5434
5435 /**
5436 * ipr_isr - Interrupt service routine
5437 * @irq: irq number
5438 * @devp: pointer to ioa config struct
5439 *
5440 * Return value:
5441 * IRQ_NONE / IRQ_HANDLED
5442 **/
5443 static irqreturn_t ipr_isr(int irq, void *devp)
5444 {
5445 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5446 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5447 unsigned long hrrq_flags = 0;
5448 u32 int_reg = 0;
5449 int num_hrrq = 0;
5450 int irq_none = 0;
5451 struct ipr_cmnd *ipr_cmd, *temp;
5452 irqreturn_t rc = IRQ_NONE;
5453 LIST_HEAD(doneq);
5454
5455 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5456 /* If interrupts are disabled, ignore the interrupt */
5457 if (!hrrq->allow_interrupts) {
5458 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5459 return IRQ_NONE;
5460 }
5461
5462 while (1) {
5463 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5464 rc = IRQ_HANDLED;
5465
5466 if (!ioa_cfg->clear_isr)
5467 break;
5468
5469 /* Clear the PCI interrupt */
5470 num_hrrq = 0;
5471 do {
5472 writel(IPR_PCII_HRRQ_UPDATED,
5473 ioa_cfg->regs.clr_interrupt_reg32);
5474 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5475 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5476 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5477
5478 } else if (rc == IRQ_NONE && irq_none == 0) {
5479 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5480 irq_none++;
5481 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5482 int_reg & IPR_PCII_HRRQ_UPDATED) {
5483 ipr_isr_eh(ioa_cfg,
5484 "Error clearing HRRQ: ", num_hrrq);
5485 rc = IRQ_HANDLED;
5486 break;
5487 } else
5488 break;
5489 }
5490
5491 if (unlikely(rc == IRQ_NONE))
5492 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5493
5494 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5495 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5496 list_del(&ipr_cmd->queue);
5497 del_timer(&ipr_cmd->timer);
5498 ipr_cmd->fast_done(ipr_cmd);
5499 }
5500 return rc;
5501 }
5502
5503 /**
5504 * ipr_isr_mhrrq - Interrupt service routine
5505 * @irq: irq number
5506 * @devp: pointer to ioa config struct
5507 *
5508 * Return value:
5509 * IRQ_NONE / IRQ_HANDLED
5510 **/
5511 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5512 {
5513 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5514 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5515 unsigned long hrrq_flags = 0;
5516 struct ipr_cmnd *ipr_cmd, *temp;
5517 irqreturn_t rc = IRQ_NONE;
5518 LIST_HEAD(doneq);
5519
5520 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5521
5522 /* If interrupts are disabled, ignore the interrupt */
5523 if (!hrrq->allow_interrupts) {
5524 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5525 return IRQ_NONE;
5526 }
5527
5528 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5529 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5530 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5531 hrrq->toggle_bit) {
5532 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5533 blk_iopoll_sched(&hrrq->iopoll);
5534 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5535 return IRQ_HANDLED;
5536 }
5537 } else {
5538 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5539 hrrq->toggle_bit)
5540
5541 if (ipr_process_hrrq(hrrq, -1, &doneq))
5542 rc = IRQ_HANDLED;
5543 }
5544
5545 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5546
5547 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5548 list_del(&ipr_cmd->queue);
5549 del_timer(&ipr_cmd->timer);
5550 ipr_cmd->fast_done(ipr_cmd);
5551 }
5552 return rc;
5553 }
5554
5555 /**
5556 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5557 * @ioa_cfg: ioa config struct
5558 * @ipr_cmd: ipr command struct
5559 *
5560 * Return value:
5561 * 0 on success / -1 on failure
5562 **/
5563 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5564 struct ipr_cmnd *ipr_cmd)
5565 {
5566 int i, nseg;
5567 struct scatterlist *sg;
5568 u32 length;
5569 u32 ioadl_flags = 0;
5570 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5571 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5572 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5573
5574 length = scsi_bufflen(scsi_cmd);
5575 if (!length)
5576 return 0;
5577
5578 nseg = scsi_dma_map(scsi_cmd);
5579 if (nseg < 0) {
5580 if (printk_ratelimit())
5581 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5582 return -1;
5583 }
5584
5585 ipr_cmd->dma_use_sg = nseg;
5586
5587 ioarcb->data_transfer_length = cpu_to_be32(length);
5588 ioarcb->ioadl_len =
5589 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5590
5591 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5592 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5593 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5594 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5595 ioadl_flags = IPR_IOADL_FLAGS_READ;
5596
5597 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5598 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5599 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5600 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5601 }
5602
5603 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5604 return 0;
5605 }
5606
5607 /**
5608 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5609 * @ioa_cfg: ioa config struct
5610 * @ipr_cmd: ipr command struct
5611 *
5612 * Return value:
5613 * 0 on success / -1 on failure
5614 **/
5615 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5616 struct ipr_cmnd *ipr_cmd)
5617 {
5618 int i, nseg;
5619 struct scatterlist *sg;
5620 u32 length;
5621 u32 ioadl_flags = 0;
5622 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5623 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5624 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5625
5626 length = scsi_bufflen(scsi_cmd);
5627 if (!length)
5628 return 0;
5629
5630 nseg = scsi_dma_map(scsi_cmd);
5631 if (nseg < 0) {
5632 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5633 return -1;
5634 }
5635
5636 ipr_cmd->dma_use_sg = nseg;
5637
5638 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5639 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5640 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5641 ioarcb->data_transfer_length = cpu_to_be32(length);
5642 ioarcb->ioadl_len =
5643 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5644 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5645 ioadl_flags = IPR_IOADL_FLAGS_READ;
5646 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5647 ioarcb->read_ioadl_len =
5648 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5649 }
5650
5651 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5652 ioadl = ioarcb->u.add_data.u.ioadl;
5653 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5654 offsetof(struct ipr_ioarcb, u.add_data));
5655 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5656 }
5657
5658 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5659 ioadl[i].flags_and_data_len =
5660 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5661 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5662 }
5663
5664 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5665 return 0;
5666 }
5667
5668 /**
5669 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5670 * @scsi_cmd: scsi command struct
5671 *
5672 * Return value:
5673 * task attributes
5674 **/
5675 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5676 {
5677 u8 tag[2];
5678 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5679
5680 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5681 switch (tag[0]) {
5682 case MSG_SIMPLE_TAG:
5683 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5684 break;
5685 case MSG_HEAD_TAG:
5686 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5687 break;
5688 case MSG_ORDERED_TAG:
5689 rc = IPR_FLAGS_LO_ORDERED_TASK;
5690 break;
5691 };
5692 }
5693
5694 return rc;
5695 }
5696
5697 /**
5698 * ipr_erp_done - Process completion of ERP for a device
5699 * @ipr_cmd: ipr command struct
5700 *
5701 * This function copies the sense buffer into the scsi_cmd
5702 * struct and pushes the scsi_done function.
5703 *
5704 * Return value:
5705 * nothing
5706 **/
5707 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5708 {
5709 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5710 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5711 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5712
5713 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5714 scsi_cmd->result |= (DID_ERROR << 16);
5715 scmd_printk(KERN_ERR, scsi_cmd,
5716 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5717 } else {
5718 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5719 SCSI_SENSE_BUFFERSIZE);
5720 }
5721
5722 if (res) {
5723 if (!ipr_is_naca_model(res))
5724 res->needs_sync_complete = 1;
5725 res->in_erp = 0;
5726 }
5727 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5728 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5729 scsi_cmd->scsi_done(scsi_cmd);
5730 }
5731
5732 /**
5733 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5734 * @ipr_cmd: ipr command struct
5735 *
5736 * Return value:
5737 * none
5738 **/
5739 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5740 {
5741 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5742 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5743 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5744
5745 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5746 ioarcb->data_transfer_length = 0;
5747 ioarcb->read_data_transfer_length = 0;
5748 ioarcb->ioadl_len = 0;
5749 ioarcb->read_ioadl_len = 0;
5750 ioasa->hdr.ioasc = 0;
5751 ioasa->hdr.residual_data_len = 0;
5752
5753 if (ipr_cmd->ioa_cfg->sis64)
5754 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5755 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5756 else {
5757 ioarcb->write_ioadl_addr =
5758 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5759 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5760 }
5761 }
5762
5763 /**
5764 * ipr_erp_request_sense - Send request sense to a device
5765 * @ipr_cmd: ipr command struct
5766 *
5767 * This function sends a request sense to a device as a result
5768 * of a check condition.
5769 *
5770 * Return value:
5771 * nothing
5772 **/
5773 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5774 {
5775 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5776 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5777
5778 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5779 ipr_erp_done(ipr_cmd);
5780 return;
5781 }
5782
5783 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5784
5785 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5786 cmd_pkt->cdb[0] = REQUEST_SENSE;
5787 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5788 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5789 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5790 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5791
5792 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5793 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5794
5795 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5796 IPR_REQUEST_SENSE_TIMEOUT * 2);
5797 }
5798
5799 /**
5800 * ipr_erp_cancel_all - Send cancel all to a device
5801 * @ipr_cmd: ipr command struct
5802 *
5803 * This function sends a cancel all to a device to clear the
5804 * queue. If we are running TCQ on the device, QERR is set to 1,
5805 * which means all outstanding ops have been dropped on the floor.
5806 * Cancel all will return them to us.
5807 *
5808 * Return value:
5809 * nothing
5810 **/
5811 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5812 {
5813 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5814 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5815 struct ipr_cmd_pkt *cmd_pkt;
5816
5817 res->in_erp = 1;
5818
5819 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5820
5821 if (!scsi_get_tag_type(scsi_cmd->device)) {
5822 ipr_erp_request_sense(ipr_cmd);
5823 return;
5824 }
5825
5826 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5827 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5828 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5829
5830 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5831 IPR_CANCEL_ALL_TIMEOUT);
5832 }
5833
5834 /**
5835 * ipr_dump_ioasa - Dump contents of IOASA
5836 * @ioa_cfg: ioa config struct
5837 * @ipr_cmd: ipr command struct
5838 * @res: resource entry struct
5839 *
5840 * This function is invoked by the interrupt handler when ops
5841 * fail. It will log the IOASA if appropriate. Only called
5842 * for GPDD ops.
5843 *
5844 * Return value:
5845 * none
5846 **/
5847 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5848 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5849 {
5850 int i;
5851 u16 data_len;
5852 u32 ioasc, fd_ioasc;
5853 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5854 __be32 *ioasa_data = (__be32 *)ioasa;
5855 int error_index;
5856
5857 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5858 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5859
5860 if (0 == ioasc)
5861 return;
5862
5863 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5864 return;
5865
5866 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5867 error_index = ipr_get_error(fd_ioasc);
5868 else
5869 error_index = ipr_get_error(ioasc);
5870
5871 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5872 /* Don't log an error if the IOA already logged one */
5873 if (ioasa->hdr.ilid != 0)
5874 return;
5875
5876 if (!ipr_is_gscsi(res))
5877 return;
5878
5879 if (ipr_error_table[error_index].log_ioasa == 0)
5880 return;
5881 }
5882
5883 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5884
5885 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5886 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5887 data_len = sizeof(struct ipr_ioasa64);
5888 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5889 data_len = sizeof(struct ipr_ioasa);
5890
5891 ipr_err("IOASA Dump:\n");
5892
5893 for (i = 0; i < data_len / 4; i += 4) {
5894 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5895 be32_to_cpu(ioasa_data[i]),
5896 be32_to_cpu(ioasa_data[i+1]),
5897 be32_to_cpu(ioasa_data[i+2]),
5898 be32_to_cpu(ioasa_data[i+3]));
5899 }
5900 }
5901
5902 /**
5903 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5904 * @ioasa: IOASA
5905 * @sense_buf: sense data buffer
5906 *
5907 * Return value:
5908 * none
5909 **/
5910 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5911 {
5912 u32 failing_lba;
5913 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5914 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5915 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5916 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5917
5918 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5919
5920 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5921 return;
5922
5923 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5924
5925 if (ipr_is_vset_device(res) &&
5926 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5927 ioasa->u.vset.failing_lba_hi != 0) {
5928 sense_buf[0] = 0x72;
5929 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5930 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5931 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5932
5933 sense_buf[7] = 12;
5934 sense_buf[8] = 0;
5935 sense_buf[9] = 0x0A;
5936 sense_buf[10] = 0x80;
5937
5938 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5939
5940 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5941 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5942 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5943 sense_buf[15] = failing_lba & 0x000000ff;
5944
5945 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5946
5947 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5948 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5949 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5950 sense_buf[19] = failing_lba & 0x000000ff;
5951 } else {
5952 sense_buf[0] = 0x70;
5953 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5954 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5955 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5956
5957 /* Illegal request */
5958 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5959 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5960 sense_buf[7] = 10; /* additional length */
5961
5962 /* IOARCB was in error */
5963 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5964 sense_buf[15] = 0xC0;
5965 else /* Parameter data was invalid */
5966 sense_buf[15] = 0x80;
5967
5968 sense_buf[16] =
5969 ((IPR_FIELD_POINTER_MASK &
5970 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5971 sense_buf[17] =
5972 (IPR_FIELD_POINTER_MASK &
5973 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5974 } else {
5975 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5976 if (ipr_is_vset_device(res))
5977 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5978 else
5979 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5980
5981 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5982 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5983 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5984 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5985 sense_buf[6] = failing_lba & 0x000000ff;
5986 }
5987
5988 sense_buf[7] = 6; /* additional length */
5989 }
5990 }
5991 }
5992
5993 /**
5994 * ipr_get_autosense - Copy autosense data to sense buffer
5995 * @ipr_cmd: ipr command struct
5996 *
5997 * This function copies the autosense buffer to the buffer
5998 * in the scsi_cmd, if there is autosense available.
5999 *
6000 * Return value:
6001 * 1 if autosense was available / 0 if not
6002 **/
6003 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6004 {
6005 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6006 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6007
6008 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6009 return 0;
6010
6011 if (ipr_cmd->ioa_cfg->sis64)
6012 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6013 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6014 SCSI_SENSE_BUFFERSIZE));
6015 else
6016 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6017 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6018 SCSI_SENSE_BUFFERSIZE));
6019 return 1;
6020 }
6021
6022 /**
6023 * ipr_erp_start - Process an error response for a SCSI op
6024 * @ioa_cfg: ioa config struct
6025 * @ipr_cmd: ipr command struct
6026 *
6027 * This function determines whether or not to initiate ERP
6028 * on the affected device.
6029 *
6030 * Return value:
6031 * nothing
6032 **/
6033 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6034 struct ipr_cmnd *ipr_cmd)
6035 {
6036 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6037 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6038 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6039 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6040
6041 if (!res) {
6042 ipr_scsi_eh_done(ipr_cmd);
6043 return;
6044 }
6045
6046 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6047 ipr_gen_sense(ipr_cmd);
6048
6049 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6050
6051 switch (masked_ioasc) {
6052 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6053 if (ipr_is_naca_model(res))
6054 scsi_cmd->result |= (DID_ABORT << 16);
6055 else
6056 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6057 break;
6058 case IPR_IOASC_IR_RESOURCE_HANDLE:
6059 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6060 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6061 break;
6062 case IPR_IOASC_HW_SEL_TIMEOUT:
6063 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6064 if (!ipr_is_naca_model(res))
6065 res->needs_sync_complete = 1;
6066 break;
6067 case IPR_IOASC_SYNC_REQUIRED:
6068 if (!res->in_erp)
6069 res->needs_sync_complete = 1;
6070 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6071 break;
6072 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6073 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6074 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6075 break;
6076 case IPR_IOASC_BUS_WAS_RESET:
6077 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6078 /*
6079 * Report the bus reset and ask for a retry. The device
6080 * will give CC/UA the next command.
6081 */
6082 if (!res->resetting_device)
6083 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6084 scsi_cmd->result |= (DID_ERROR << 16);
6085 if (!ipr_is_naca_model(res))
6086 res->needs_sync_complete = 1;
6087 break;
6088 case IPR_IOASC_HW_DEV_BUS_STATUS:
6089 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6090 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6091 if (!ipr_get_autosense(ipr_cmd)) {
6092 if (!ipr_is_naca_model(res)) {
6093 ipr_erp_cancel_all(ipr_cmd);
6094 return;
6095 }
6096 }
6097 }
6098 if (!ipr_is_naca_model(res))
6099 res->needs_sync_complete = 1;
6100 break;
6101 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6102 break;
6103 default:
6104 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6105 scsi_cmd->result |= (DID_ERROR << 16);
6106 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6107 res->needs_sync_complete = 1;
6108 break;
6109 }
6110
6111 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6112 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6113 scsi_cmd->scsi_done(scsi_cmd);
6114 }
6115
6116 /**
6117 * ipr_scsi_done - mid-layer done function
6118 * @ipr_cmd: ipr command struct
6119 *
6120 * This function is invoked by the interrupt handler for
6121 * ops generated by the SCSI mid-layer
6122 *
6123 * Return value:
6124 * none
6125 **/
6126 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6127 {
6128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6129 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6130 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6131 unsigned long hrrq_flags;
6132
6133 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6134
6135 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6136 scsi_dma_unmap(scsi_cmd);
6137
6138 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6139 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6140 scsi_cmd->scsi_done(scsi_cmd);
6141 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6142 } else {
6143 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6144 ipr_erp_start(ioa_cfg, ipr_cmd);
6145 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6146 }
6147 }
6148
6149 /**
6150 * ipr_queuecommand - Queue a mid-layer request
6151 * @shost: scsi host struct
6152 * @scsi_cmd: scsi command struct
6153 *
6154 * This function queues a request generated by the mid-layer.
6155 *
6156 * Return value:
6157 * 0 on success
6158 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6159 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6160 **/
6161 static int ipr_queuecommand(struct Scsi_Host *shost,
6162 struct scsi_cmnd *scsi_cmd)
6163 {
6164 struct ipr_ioa_cfg *ioa_cfg;
6165 struct ipr_resource_entry *res;
6166 struct ipr_ioarcb *ioarcb;
6167 struct ipr_cmnd *ipr_cmd;
6168 unsigned long hrrq_flags, lock_flags;
6169 int rc;
6170 struct ipr_hrr_queue *hrrq;
6171 int hrrq_id;
6172
6173 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6174
6175 scsi_cmd->result = (DID_OK << 16);
6176 res = scsi_cmd->device->hostdata;
6177
6178 if (ipr_is_gata(res) && res->sata_port) {
6179 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6180 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6182 return rc;
6183 }
6184
6185 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6186 hrrq = &ioa_cfg->hrrq[hrrq_id];
6187
6188 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6189 /*
6190 * We are currently blocking all devices due to a host reset
6191 * We have told the host to stop giving us new requests, but
6192 * ERP ops don't count. FIXME
6193 */
6194 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6195 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6196 return SCSI_MLQUEUE_HOST_BUSY;
6197 }
6198
6199 /*
6200 * FIXME - Create scsi_set_host_offline interface
6201 * and the ioa_is_dead check can be removed
6202 */
6203 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6204 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6205 goto err_nodev;
6206 }
6207
6208 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6209 if (ipr_cmd == NULL) {
6210 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6211 return SCSI_MLQUEUE_HOST_BUSY;
6212 }
6213 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6214
6215 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6216 ioarcb = &ipr_cmd->ioarcb;
6217
6218 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6219 ipr_cmd->scsi_cmd = scsi_cmd;
6220 ipr_cmd->done = ipr_scsi_eh_done;
6221
6222 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6223 if (scsi_cmd->underflow == 0)
6224 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6225
6226 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6227 if (ipr_is_gscsi(res) && res->reset_occurred) {
6228 res->reset_occurred = 0;
6229 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6230 }
6231 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6232 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6233 }
6234
6235 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6236 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6237 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6238 }
6239
6240 if (ioa_cfg->sis64)
6241 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6242 else
6243 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6244
6245 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6246 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6247 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6248 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6249 if (!rc)
6250 scsi_dma_unmap(scsi_cmd);
6251 return SCSI_MLQUEUE_HOST_BUSY;
6252 }
6253
6254 if (unlikely(hrrq->ioa_is_dead)) {
6255 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6256 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6257 scsi_dma_unmap(scsi_cmd);
6258 goto err_nodev;
6259 }
6260
6261 ioarcb->res_handle = res->res_handle;
6262 if (res->needs_sync_complete) {
6263 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6264 res->needs_sync_complete = 0;
6265 }
6266 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6267 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6268 ipr_send_command(ipr_cmd);
6269 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6270 return 0;
6271
6272 err_nodev:
6273 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6274 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6275 scsi_cmd->result = (DID_NO_CONNECT << 16);
6276 scsi_cmd->scsi_done(scsi_cmd);
6277 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6278 return 0;
6279 }
6280
6281 /**
6282 * ipr_ioctl - IOCTL handler
6283 * @sdev: scsi device struct
6284 * @cmd: IOCTL cmd
6285 * @arg: IOCTL arg
6286 *
6287 * Return value:
6288 * 0 on success / other on failure
6289 **/
6290 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6291 {
6292 struct ipr_resource_entry *res;
6293
6294 res = (struct ipr_resource_entry *)sdev->hostdata;
6295 if (res && ipr_is_gata(res)) {
6296 if (cmd == HDIO_GET_IDENTITY)
6297 return -ENOTTY;
6298 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6299 }
6300
6301 return -EINVAL;
6302 }
6303
6304 /**
6305 * ipr_info - Get information about the card/driver
6306 * @scsi_host: scsi host struct
6307 *
6308 * Return value:
6309 * pointer to buffer with description string
6310 **/
6311 static const char *ipr_ioa_info(struct Scsi_Host *host)
6312 {
6313 static char buffer[512];
6314 struct ipr_ioa_cfg *ioa_cfg;
6315 unsigned long lock_flags = 0;
6316
6317 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6318
6319 spin_lock_irqsave(host->host_lock, lock_flags);
6320 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6321 spin_unlock_irqrestore(host->host_lock, lock_flags);
6322
6323 return buffer;
6324 }
6325
6326 static struct scsi_host_template driver_template = {
6327 .module = THIS_MODULE,
6328 .name = "IPR",
6329 .info = ipr_ioa_info,
6330 .ioctl = ipr_ioctl,
6331 .queuecommand = ipr_queuecommand,
6332 .eh_abort_handler = ipr_eh_abort,
6333 .eh_device_reset_handler = ipr_eh_dev_reset,
6334 .eh_host_reset_handler = ipr_eh_host_reset,
6335 .slave_alloc = ipr_slave_alloc,
6336 .slave_configure = ipr_slave_configure,
6337 .slave_destroy = ipr_slave_destroy,
6338 .target_alloc = ipr_target_alloc,
6339 .target_destroy = ipr_target_destroy,
6340 .change_queue_depth = ipr_change_queue_depth,
6341 .change_queue_type = ipr_change_queue_type,
6342 .bios_param = ipr_biosparam,
6343 .can_queue = IPR_MAX_COMMANDS,
6344 .this_id = -1,
6345 .sg_tablesize = IPR_MAX_SGLIST,
6346 .max_sectors = IPR_IOA_MAX_SECTORS,
6347 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6348 .use_clustering = ENABLE_CLUSTERING,
6349 .shost_attrs = ipr_ioa_attrs,
6350 .sdev_attrs = ipr_dev_attrs,
6351 .proc_name = IPR_NAME,
6352 .no_write_same = 1,
6353 };
6354
6355 /**
6356 * ipr_ata_phy_reset - libata phy_reset handler
6357 * @ap: ata port to reset
6358 *
6359 **/
6360 static void ipr_ata_phy_reset(struct ata_port *ap)
6361 {
6362 unsigned long flags;
6363 struct ipr_sata_port *sata_port = ap->private_data;
6364 struct ipr_resource_entry *res = sata_port->res;
6365 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6366 int rc;
6367
6368 ENTER;
6369 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6370 while (ioa_cfg->in_reset_reload) {
6371 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6372 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6373 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6374 }
6375
6376 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6377 goto out_unlock;
6378
6379 rc = ipr_device_reset(ioa_cfg, res);
6380
6381 if (rc) {
6382 ap->link.device[0].class = ATA_DEV_NONE;
6383 goto out_unlock;
6384 }
6385
6386 ap->link.device[0].class = res->ata_class;
6387 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6388 ap->link.device[0].class = ATA_DEV_NONE;
6389
6390 out_unlock:
6391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6392 LEAVE;
6393 }
6394
6395 /**
6396 * ipr_ata_post_internal - Cleanup after an internal command
6397 * @qc: ATA queued command
6398 *
6399 * Return value:
6400 * none
6401 **/
6402 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6403 {
6404 struct ipr_sata_port *sata_port = qc->ap->private_data;
6405 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6406 struct ipr_cmnd *ipr_cmd;
6407 struct ipr_hrr_queue *hrrq;
6408 unsigned long flags;
6409
6410 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6411 while (ioa_cfg->in_reset_reload) {
6412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6413 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6414 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6415 }
6416
6417 for_each_hrrq(hrrq, ioa_cfg) {
6418 spin_lock(&hrrq->_lock);
6419 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6420 if (ipr_cmd->qc == qc) {
6421 ipr_device_reset(ioa_cfg, sata_port->res);
6422 break;
6423 }
6424 }
6425 spin_unlock(&hrrq->_lock);
6426 }
6427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6428 }
6429
6430 /**
6431 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6432 * @regs: destination
6433 * @tf: source ATA taskfile
6434 *
6435 * Return value:
6436 * none
6437 **/
6438 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6439 struct ata_taskfile *tf)
6440 {
6441 regs->feature = tf->feature;
6442 regs->nsect = tf->nsect;
6443 regs->lbal = tf->lbal;
6444 regs->lbam = tf->lbam;
6445 regs->lbah = tf->lbah;
6446 regs->device = tf->device;
6447 regs->command = tf->command;
6448 regs->hob_feature = tf->hob_feature;
6449 regs->hob_nsect = tf->hob_nsect;
6450 regs->hob_lbal = tf->hob_lbal;
6451 regs->hob_lbam = tf->hob_lbam;
6452 regs->hob_lbah = tf->hob_lbah;
6453 regs->ctl = tf->ctl;
6454 }
6455
6456 /**
6457 * ipr_sata_done - done function for SATA commands
6458 * @ipr_cmd: ipr command struct
6459 *
6460 * This function is invoked by the interrupt handler for
6461 * ops generated by the SCSI mid-layer to SATA devices
6462 *
6463 * Return value:
6464 * none
6465 **/
6466 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6467 {
6468 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6469 struct ata_queued_cmd *qc = ipr_cmd->qc;
6470 struct ipr_sata_port *sata_port = qc->ap->private_data;
6471 struct ipr_resource_entry *res = sata_port->res;
6472 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6473
6474 spin_lock(&ipr_cmd->hrrq->_lock);
6475 if (ipr_cmd->ioa_cfg->sis64)
6476 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6477 sizeof(struct ipr_ioasa_gata));
6478 else
6479 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6480 sizeof(struct ipr_ioasa_gata));
6481 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6482
6483 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6484 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6485
6486 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6487 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6488 else
6489 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6490 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6491 spin_unlock(&ipr_cmd->hrrq->_lock);
6492 ata_qc_complete(qc);
6493 }
6494
6495 /**
6496 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6497 * @ipr_cmd: ipr command struct
6498 * @qc: ATA queued command
6499 *
6500 **/
6501 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6502 struct ata_queued_cmd *qc)
6503 {
6504 u32 ioadl_flags = 0;
6505 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6506 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6507 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6508 int len = qc->nbytes;
6509 struct scatterlist *sg;
6510 unsigned int si;
6511 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6512
6513 if (len == 0)
6514 return;
6515
6516 if (qc->dma_dir == DMA_TO_DEVICE) {
6517 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6518 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6519 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6520 ioadl_flags = IPR_IOADL_FLAGS_READ;
6521
6522 ioarcb->data_transfer_length = cpu_to_be32(len);
6523 ioarcb->ioadl_len =
6524 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6525 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6526 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6527
6528 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6529 ioadl64->flags = cpu_to_be32(ioadl_flags);
6530 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6531 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6532
6533 last_ioadl64 = ioadl64;
6534 ioadl64++;
6535 }
6536
6537 if (likely(last_ioadl64))
6538 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6539 }
6540
6541 /**
6542 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6543 * @ipr_cmd: ipr command struct
6544 * @qc: ATA queued command
6545 *
6546 **/
6547 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6548 struct ata_queued_cmd *qc)
6549 {
6550 u32 ioadl_flags = 0;
6551 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6552 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6553 struct ipr_ioadl_desc *last_ioadl = NULL;
6554 int len = qc->nbytes;
6555 struct scatterlist *sg;
6556 unsigned int si;
6557
6558 if (len == 0)
6559 return;
6560
6561 if (qc->dma_dir == DMA_TO_DEVICE) {
6562 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6563 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6564 ioarcb->data_transfer_length = cpu_to_be32(len);
6565 ioarcb->ioadl_len =
6566 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6567 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6568 ioadl_flags = IPR_IOADL_FLAGS_READ;
6569 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6570 ioarcb->read_ioadl_len =
6571 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6572 }
6573
6574 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6575 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6576 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6577
6578 last_ioadl = ioadl;
6579 ioadl++;
6580 }
6581
6582 if (likely(last_ioadl))
6583 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6584 }
6585
6586 /**
6587 * ipr_qc_defer - Get a free ipr_cmd
6588 * @qc: queued command
6589 *
6590 * Return value:
6591 * 0 if success
6592 **/
6593 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6594 {
6595 struct ata_port *ap = qc->ap;
6596 struct ipr_sata_port *sata_port = ap->private_data;
6597 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6598 struct ipr_cmnd *ipr_cmd;
6599 struct ipr_hrr_queue *hrrq;
6600 int hrrq_id;
6601
6602 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6603 hrrq = &ioa_cfg->hrrq[hrrq_id];
6604
6605 qc->lldd_task = NULL;
6606 spin_lock(&hrrq->_lock);
6607 if (unlikely(hrrq->ioa_is_dead)) {
6608 spin_unlock(&hrrq->_lock);
6609 return 0;
6610 }
6611
6612 if (unlikely(!hrrq->allow_cmds)) {
6613 spin_unlock(&hrrq->_lock);
6614 return ATA_DEFER_LINK;
6615 }
6616
6617 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6618 if (ipr_cmd == NULL) {
6619 spin_unlock(&hrrq->_lock);
6620 return ATA_DEFER_LINK;
6621 }
6622
6623 qc->lldd_task = ipr_cmd;
6624 spin_unlock(&hrrq->_lock);
6625 return 0;
6626 }
6627
6628 /**
6629 * ipr_qc_issue - Issue a SATA qc to a device
6630 * @qc: queued command
6631 *
6632 * Return value:
6633 * 0 if success
6634 **/
6635 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6636 {
6637 struct ata_port *ap = qc->ap;
6638 struct ipr_sata_port *sata_port = ap->private_data;
6639 struct ipr_resource_entry *res = sata_port->res;
6640 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6641 struct ipr_cmnd *ipr_cmd;
6642 struct ipr_ioarcb *ioarcb;
6643 struct ipr_ioarcb_ata_regs *regs;
6644
6645 if (qc->lldd_task == NULL)
6646 ipr_qc_defer(qc);
6647
6648 ipr_cmd = qc->lldd_task;
6649 if (ipr_cmd == NULL)
6650 return AC_ERR_SYSTEM;
6651
6652 qc->lldd_task = NULL;
6653 spin_lock(&ipr_cmd->hrrq->_lock);
6654 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6655 ipr_cmd->hrrq->ioa_is_dead)) {
6656 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6657 spin_unlock(&ipr_cmd->hrrq->_lock);
6658 return AC_ERR_SYSTEM;
6659 }
6660
6661 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6662 ioarcb = &ipr_cmd->ioarcb;
6663
6664 if (ioa_cfg->sis64) {
6665 regs = &ipr_cmd->i.ata_ioadl.regs;
6666 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6667 } else
6668 regs = &ioarcb->u.add_data.u.regs;
6669
6670 memset(regs, 0, sizeof(*regs));
6671 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6672
6673 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6674 ipr_cmd->qc = qc;
6675 ipr_cmd->done = ipr_sata_done;
6676 ipr_cmd->ioarcb.res_handle = res->res_handle;
6677 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6678 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6679 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6680 ipr_cmd->dma_use_sg = qc->n_elem;
6681
6682 if (ioa_cfg->sis64)
6683 ipr_build_ata_ioadl64(ipr_cmd, qc);
6684 else
6685 ipr_build_ata_ioadl(ipr_cmd, qc);
6686
6687 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6688 ipr_copy_sata_tf(regs, &qc->tf);
6689 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6690 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6691
6692 switch (qc->tf.protocol) {
6693 case ATA_PROT_NODATA:
6694 case ATA_PROT_PIO:
6695 break;
6696
6697 case ATA_PROT_DMA:
6698 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6699 break;
6700
6701 case ATAPI_PROT_PIO:
6702 case ATAPI_PROT_NODATA:
6703 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6704 break;
6705
6706 case ATAPI_PROT_DMA:
6707 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6708 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6709 break;
6710
6711 default:
6712 WARN_ON(1);
6713 spin_unlock(&ipr_cmd->hrrq->_lock);
6714 return AC_ERR_INVALID;
6715 }
6716
6717 ipr_send_command(ipr_cmd);
6718 spin_unlock(&ipr_cmd->hrrq->_lock);
6719
6720 return 0;
6721 }
6722
6723 /**
6724 * ipr_qc_fill_rtf - Read result TF
6725 * @qc: ATA queued command
6726 *
6727 * Return value:
6728 * true
6729 **/
6730 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6731 {
6732 struct ipr_sata_port *sata_port = qc->ap->private_data;
6733 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6734 struct ata_taskfile *tf = &qc->result_tf;
6735
6736 tf->feature = g->error;
6737 tf->nsect = g->nsect;
6738 tf->lbal = g->lbal;
6739 tf->lbam = g->lbam;
6740 tf->lbah = g->lbah;
6741 tf->device = g->device;
6742 tf->command = g->status;
6743 tf->hob_nsect = g->hob_nsect;
6744 tf->hob_lbal = g->hob_lbal;
6745 tf->hob_lbam = g->hob_lbam;
6746 tf->hob_lbah = g->hob_lbah;
6747
6748 return true;
6749 }
6750
6751 static struct ata_port_operations ipr_sata_ops = {
6752 .phy_reset = ipr_ata_phy_reset,
6753 .hardreset = ipr_sata_reset,
6754 .post_internal_cmd = ipr_ata_post_internal,
6755 .qc_prep = ata_noop_qc_prep,
6756 .qc_defer = ipr_qc_defer,
6757 .qc_issue = ipr_qc_issue,
6758 .qc_fill_rtf = ipr_qc_fill_rtf,
6759 .port_start = ata_sas_port_start,
6760 .port_stop = ata_sas_port_stop
6761 };
6762
6763 static struct ata_port_info sata_port_info = {
6764 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6765 .pio_mask = ATA_PIO4_ONLY,
6766 .mwdma_mask = ATA_MWDMA2,
6767 .udma_mask = ATA_UDMA6,
6768 .port_ops = &ipr_sata_ops
6769 };
6770
6771 #ifdef CONFIG_PPC_PSERIES
6772 static const u16 ipr_blocked_processors[] = {
6773 PVR_NORTHSTAR,
6774 PVR_PULSAR,
6775 PVR_POWER4,
6776 PVR_ICESTAR,
6777 PVR_SSTAR,
6778 PVR_POWER4p,
6779 PVR_630,
6780 PVR_630p
6781 };
6782
6783 /**
6784 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6785 * @ioa_cfg: ioa cfg struct
6786 *
6787 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6788 * certain pSeries hardware. This function determines if the given
6789 * adapter is in one of these confgurations or not.
6790 *
6791 * Return value:
6792 * 1 if adapter is not supported / 0 if adapter is supported
6793 **/
6794 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6795 {
6796 int i;
6797
6798 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6799 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6800 if (pvr_version_is(ipr_blocked_processors[i]))
6801 return 1;
6802 }
6803 }
6804 return 0;
6805 }
6806 #else
6807 #define ipr_invalid_adapter(ioa_cfg) 0
6808 #endif
6809
6810 /**
6811 * ipr_ioa_bringdown_done - IOA bring down completion.
6812 * @ipr_cmd: ipr command struct
6813 *
6814 * This function processes the completion of an adapter bring down.
6815 * It wakes any reset sleepers.
6816 *
6817 * Return value:
6818 * IPR_RC_JOB_RETURN
6819 **/
6820 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6821 {
6822 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6823 int i;
6824
6825 ENTER;
6826 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6827 ipr_trace;
6828 spin_unlock_irq(ioa_cfg->host->host_lock);
6829 scsi_unblock_requests(ioa_cfg->host);
6830 spin_lock_irq(ioa_cfg->host->host_lock);
6831 }
6832
6833 ioa_cfg->in_reset_reload = 0;
6834 ioa_cfg->reset_retries = 0;
6835 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6836 spin_lock(&ioa_cfg->hrrq[i]._lock);
6837 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6838 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6839 }
6840 wmb();
6841
6842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6843 wake_up_all(&ioa_cfg->reset_wait_q);
6844 LEAVE;
6845
6846 return IPR_RC_JOB_RETURN;
6847 }
6848
6849 /**
6850 * ipr_ioa_reset_done - IOA reset completion.
6851 * @ipr_cmd: ipr command struct
6852 *
6853 * This function processes the completion of an adapter reset.
6854 * It schedules any necessary mid-layer add/removes and
6855 * wakes any reset sleepers.
6856 *
6857 * Return value:
6858 * IPR_RC_JOB_RETURN
6859 **/
6860 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6861 {
6862 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6863 struct ipr_resource_entry *res;
6864 struct ipr_hostrcb *hostrcb, *temp;
6865 int i = 0, j;
6866
6867 ENTER;
6868 ioa_cfg->in_reset_reload = 0;
6869 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6870 spin_lock(&ioa_cfg->hrrq[j]._lock);
6871 ioa_cfg->hrrq[j].allow_cmds = 1;
6872 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6873 }
6874 wmb();
6875 ioa_cfg->reset_cmd = NULL;
6876 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6877
6878 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6879 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6880 ipr_trace;
6881 break;
6882 }
6883 }
6884 schedule_work(&ioa_cfg->work_q);
6885
6886 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6887 list_del(&hostrcb->queue);
6888 if (i++ < IPR_NUM_LOG_HCAMS)
6889 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6890 else
6891 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6892 }
6893
6894 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6895 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6896
6897 ioa_cfg->reset_retries = 0;
6898 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6899 wake_up_all(&ioa_cfg->reset_wait_q);
6900
6901 spin_unlock(ioa_cfg->host->host_lock);
6902 scsi_unblock_requests(ioa_cfg->host);
6903 spin_lock(ioa_cfg->host->host_lock);
6904
6905 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6906 scsi_block_requests(ioa_cfg->host);
6907
6908 LEAVE;
6909 return IPR_RC_JOB_RETURN;
6910 }
6911
6912 /**
6913 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6914 * @supported_dev: supported device struct
6915 * @vpids: vendor product id struct
6916 *
6917 * Return value:
6918 * none
6919 **/
6920 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6921 struct ipr_std_inq_vpids *vpids)
6922 {
6923 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6924 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6925 supported_dev->num_records = 1;
6926 supported_dev->data_length =
6927 cpu_to_be16(sizeof(struct ipr_supported_device));
6928 supported_dev->reserved = 0;
6929 }
6930
6931 /**
6932 * ipr_set_supported_devs - Send Set Supported Devices for a device
6933 * @ipr_cmd: ipr command struct
6934 *
6935 * This function sends a Set Supported Devices to the adapter
6936 *
6937 * Return value:
6938 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6939 **/
6940 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6941 {
6942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6943 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6944 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6945 struct ipr_resource_entry *res = ipr_cmd->u.res;
6946
6947 ipr_cmd->job_step = ipr_ioa_reset_done;
6948
6949 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6950 if (!ipr_is_scsi_disk(res))
6951 continue;
6952
6953 ipr_cmd->u.res = res;
6954 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6955
6956 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6957 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6958 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6959
6960 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6961 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6962 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6963 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6964
6965 ipr_init_ioadl(ipr_cmd,
6966 ioa_cfg->vpd_cbs_dma +
6967 offsetof(struct ipr_misc_cbs, supp_dev),
6968 sizeof(struct ipr_supported_device),
6969 IPR_IOADL_FLAGS_WRITE_LAST);
6970
6971 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6972 IPR_SET_SUP_DEVICE_TIMEOUT);
6973
6974 if (!ioa_cfg->sis64)
6975 ipr_cmd->job_step = ipr_set_supported_devs;
6976 LEAVE;
6977 return IPR_RC_JOB_RETURN;
6978 }
6979
6980 LEAVE;
6981 return IPR_RC_JOB_CONTINUE;
6982 }
6983
6984 /**
6985 * ipr_get_mode_page - Locate specified mode page
6986 * @mode_pages: mode page buffer
6987 * @page_code: page code to find
6988 * @len: minimum required length for mode page
6989 *
6990 * Return value:
6991 * pointer to mode page / NULL on failure
6992 **/
6993 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6994 u32 page_code, u32 len)
6995 {
6996 struct ipr_mode_page_hdr *mode_hdr;
6997 u32 page_length;
6998 u32 length;
6999
7000 if (!mode_pages || (mode_pages->hdr.length == 0))
7001 return NULL;
7002
7003 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7004 mode_hdr = (struct ipr_mode_page_hdr *)
7005 (mode_pages->data + mode_pages->hdr.block_desc_len);
7006
7007 while (length) {
7008 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7009 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7010 return mode_hdr;
7011 break;
7012 } else {
7013 page_length = (sizeof(struct ipr_mode_page_hdr) +
7014 mode_hdr->page_length);
7015 length -= page_length;
7016 mode_hdr = (struct ipr_mode_page_hdr *)
7017 ((unsigned long)mode_hdr + page_length);
7018 }
7019 }
7020 return NULL;
7021 }
7022
7023 /**
7024 * ipr_check_term_power - Check for term power errors
7025 * @ioa_cfg: ioa config struct
7026 * @mode_pages: IOAFP mode pages buffer
7027 *
7028 * Check the IOAFP's mode page 28 for term power errors
7029 *
7030 * Return value:
7031 * nothing
7032 **/
7033 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7034 struct ipr_mode_pages *mode_pages)
7035 {
7036 int i;
7037 int entry_length;
7038 struct ipr_dev_bus_entry *bus;
7039 struct ipr_mode_page28 *mode_page;
7040
7041 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7042 sizeof(struct ipr_mode_page28));
7043
7044 entry_length = mode_page->entry_length;
7045
7046 bus = mode_page->bus;
7047
7048 for (i = 0; i < mode_page->num_entries; i++) {
7049 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7050 dev_err(&ioa_cfg->pdev->dev,
7051 "Term power is absent on scsi bus %d\n",
7052 bus->res_addr.bus);
7053 }
7054
7055 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7056 }
7057 }
7058
7059 /**
7060 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7061 * @ioa_cfg: ioa config struct
7062 *
7063 * Looks through the config table checking for SES devices. If
7064 * the SES device is in the SES table indicating a maximum SCSI
7065 * bus speed, the speed is limited for the bus.
7066 *
7067 * Return value:
7068 * none
7069 **/
7070 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7071 {
7072 u32 max_xfer_rate;
7073 int i;
7074
7075 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7076 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7077 ioa_cfg->bus_attr[i].bus_width);
7078
7079 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7080 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7081 }
7082 }
7083
7084 /**
7085 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7086 * @ioa_cfg: ioa config struct
7087 * @mode_pages: mode page 28 buffer
7088 *
7089 * Updates mode page 28 based on driver configuration
7090 *
7091 * Return value:
7092 * none
7093 **/
7094 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7095 struct ipr_mode_pages *mode_pages)
7096 {
7097 int i, entry_length;
7098 struct ipr_dev_bus_entry *bus;
7099 struct ipr_bus_attributes *bus_attr;
7100 struct ipr_mode_page28 *mode_page;
7101
7102 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7103 sizeof(struct ipr_mode_page28));
7104
7105 entry_length = mode_page->entry_length;
7106
7107 /* Loop for each device bus entry */
7108 for (i = 0, bus = mode_page->bus;
7109 i < mode_page->num_entries;
7110 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7111 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7112 dev_err(&ioa_cfg->pdev->dev,
7113 "Invalid resource address reported: 0x%08X\n",
7114 IPR_GET_PHYS_LOC(bus->res_addr));
7115 continue;
7116 }
7117
7118 bus_attr = &ioa_cfg->bus_attr[i];
7119 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7120 bus->bus_width = bus_attr->bus_width;
7121 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7122 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7123 if (bus_attr->qas_enabled)
7124 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7125 else
7126 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7127 }
7128 }
7129
7130 /**
7131 * ipr_build_mode_select - Build a mode select command
7132 * @ipr_cmd: ipr command struct
7133 * @res_handle: resource handle to send command to
7134 * @parm: Byte 2 of Mode Sense command
7135 * @dma_addr: DMA buffer address
7136 * @xfer_len: data transfer length
7137 *
7138 * Return value:
7139 * none
7140 **/
7141 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7142 __be32 res_handle, u8 parm,
7143 dma_addr_t dma_addr, u8 xfer_len)
7144 {
7145 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7146
7147 ioarcb->res_handle = res_handle;
7148 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7149 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7150 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7151 ioarcb->cmd_pkt.cdb[1] = parm;
7152 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7153
7154 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7155 }
7156
7157 /**
7158 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7159 * @ipr_cmd: ipr command struct
7160 *
7161 * This function sets up the SCSI bus attributes and sends
7162 * a Mode Select for Page 28 to activate them.
7163 *
7164 * Return value:
7165 * IPR_RC_JOB_RETURN
7166 **/
7167 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7168 {
7169 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7170 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7171 int length;
7172
7173 ENTER;
7174 ipr_scsi_bus_speed_limit(ioa_cfg);
7175 ipr_check_term_power(ioa_cfg, mode_pages);
7176 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7177 length = mode_pages->hdr.length + 1;
7178 mode_pages->hdr.length = 0;
7179
7180 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7181 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7182 length);
7183
7184 ipr_cmd->job_step = ipr_set_supported_devs;
7185 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7186 struct ipr_resource_entry, queue);
7187 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7188
7189 LEAVE;
7190 return IPR_RC_JOB_RETURN;
7191 }
7192
7193 /**
7194 * ipr_build_mode_sense - Builds a mode sense command
7195 * @ipr_cmd: ipr command struct
7196 * @res: resource entry struct
7197 * @parm: Byte 2 of mode sense command
7198 * @dma_addr: DMA address of mode sense buffer
7199 * @xfer_len: Size of DMA buffer
7200 *
7201 * Return value:
7202 * none
7203 **/
7204 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7205 __be32 res_handle,
7206 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7207 {
7208 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7209
7210 ioarcb->res_handle = res_handle;
7211 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7212 ioarcb->cmd_pkt.cdb[2] = parm;
7213 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7214 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7215
7216 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7217 }
7218
7219 /**
7220 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7221 * @ipr_cmd: ipr command struct
7222 *
7223 * This function handles the failure of an IOA bringup command.
7224 *
7225 * Return value:
7226 * IPR_RC_JOB_RETURN
7227 **/
7228 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7229 {
7230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7231 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7232
7233 dev_err(&ioa_cfg->pdev->dev,
7234 "0x%02X failed with IOASC: 0x%08X\n",
7235 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7236
7237 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7238 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7239 return IPR_RC_JOB_RETURN;
7240 }
7241
7242 /**
7243 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7244 * @ipr_cmd: ipr command struct
7245 *
7246 * This function handles the failure of a Mode Sense to the IOAFP.
7247 * Some adapters do not handle all mode pages.
7248 *
7249 * Return value:
7250 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7251 **/
7252 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7253 {
7254 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7255 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7256
7257 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7258 ipr_cmd->job_step = ipr_set_supported_devs;
7259 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7260 struct ipr_resource_entry, queue);
7261 return IPR_RC_JOB_CONTINUE;
7262 }
7263
7264 return ipr_reset_cmd_failed(ipr_cmd);
7265 }
7266
7267 /**
7268 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7269 * @ipr_cmd: ipr command struct
7270 *
7271 * This function send a Page 28 mode sense to the IOA to
7272 * retrieve SCSI bus attributes.
7273 *
7274 * Return value:
7275 * IPR_RC_JOB_RETURN
7276 **/
7277 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7278 {
7279 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7280
7281 ENTER;
7282 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7283 0x28, ioa_cfg->vpd_cbs_dma +
7284 offsetof(struct ipr_misc_cbs, mode_pages),
7285 sizeof(struct ipr_mode_pages));
7286
7287 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7288 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7289
7290 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7291
7292 LEAVE;
7293 return IPR_RC_JOB_RETURN;
7294 }
7295
7296 /**
7297 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7298 * @ipr_cmd: ipr command struct
7299 *
7300 * This function enables dual IOA RAID support if possible.
7301 *
7302 * Return value:
7303 * IPR_RC_JOB_RETURN
7304 **/
7305 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7306 {
7307 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7308 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7309 struct ipr_mode_page24 *mode_page;
7310 int length;
7311
7312 ENTER;
7313 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7314 sizeof(struct ipr_mode_page24));
7315
7316 if (mode_page)
7317 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7318
7319 length = mode_pages->hdr.length + 1;
7320 mode_pages->hdr.length = 0;
7321
7322 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7323 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7324 length);
7325
7326 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7327 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7328
7329 LEAVE;
7330 return IPR_RC_JOB_RETURN;
7331 }
7332
7333 /**
7334 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7335 * @ipr_cmd: ipr command struct
7336 *
7337 * This function handles the failure of a Mode Sense to the IOAFP.
7338 * Some adapters do not handle all mode pages.
7339 *
7340 * Return value:
7341 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7342 **/
7343 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7344 {
7345 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7346
7347 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7348 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7349 return IPR_RC_JOB_CONTINUE;
7350 }
7351
7352 return ipr_reset_cmd_failed(ipr_cmd);
7353 }
7354
7355 /**
7356 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7357 * @ipr_cmd: ipr command struct
7358 *
7359 * This function send a mode sense to the IOA to retrieve
7360 * the IOA Advanced Function Control mode page.
7361 *
7362 * Return value:
7363 * IPR_RC_JOB_RETURN
7364 **/
7365 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7366 {
7367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7368
7369 ENTER;
7370 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7371 0x24, ioa_cfg->vpd_cbs_dma +
7372 offsetof(struct ipr_misc_cbs, mode_pages),
7373 sizeof(struct ipr_mode_pages));
7374
7375 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7376 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7377
7378 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7379
7380 LEAVE;
7381 return IPR_RC_JOB_RETURN;
7382 }
7383
7384 /**
7385 * ipr_init_res_table - Initialize the resource table
7386 * @ipr_cmd: ipr command struct
7387 *
7388 * This function looks through the existing resource table, comparing
7389 * it with the config table. This function will take care of old/new
7390 * devices and schedule adding/removing them from the mid-layer
7391 * as appropriate.
7392 *
7393 * Return value:
7394 * IPR_RC_JOB_CONTINUE
7395 **/
7396 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7397 {
7398 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7399 struct ipr_resource_entry *res, *temp;
7400 struct ipr_config_table_entry_wrapper cfgtew;
7401 int entries, found, flag, i;
7402 LIST_HEAD(old_res);
7403
7404 ENTER;
7405 if (ioa_cfg->sis64)
7406 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7407 else
7408 flag = ioa_cfg->u.cfg_table->hdr.flags;
7409
7410 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7411 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7412
7413 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7414 list_move_tail(&res->queue, &old_res);
7415
7416 if (ioa_cfg->sis64)
7417 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7418 else
7419 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7420
7421 for (i = 0; i < entries; i++) {
7422 if (ioa_cfg->sis64)
7423 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7424 else
7425 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7426 found = 0;
7427
7428 list_for_each_entry_safe(res, temp, &old_res, queue) {
7429 if (ipr_is_same_device(res, &cfgtew)) {
7430 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7431 found = 1;
7432 break;
7433 }
7434 }
7435
7436 if (!found) {
7437 if (list_empty(&ioa_cfg->free_res_q)) {
7438 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7439 break;
7440 }
7441
7442 found = 1;
7443 res = list_entry(ioa_cfg->free_res_q.next,
7444 struct ipr_resource_entry, queue);
7445 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7446 ipr_init_res_entry(res, &cfgtew);
7447 res->add_to_ml = 1;
7448 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7449 res->sdev->allow_restart = 1;
7450
7451 if (found)
7452 ipr_update_res_entry(res, &cfgtew);
7453 }
7454
7455 list_for_each_entry_safe(res, temp, &old_res, queue) {
7456 if (res->sdev) {
7457 res->del_from_ml = 1;
7458 res->res_handle = IPR_INVALID_RES_HANDLE;
7459 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7460 }
7461 }
7462
7463 list_for_each_entry_safe(res, temp, &old_res, queue) {
7464 ipr_clear_res_target(res);
7465 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7466 }
7467
7468 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7469 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7470 else
7471 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7472
7473 LEAVE;
7474 return IPR_RC_JOB_CONTINUE;
7475 }
7476
7477 /**
7478 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7479 * @ipr_cmd: ipr command struct
7480 *
7481 * This function sends a Query IOA Configuration command
7482 * to the adapter to retrieve the IOA configuration table.
7483 *
7484 * Return value:
7485 * IPR_RC_JOB_RETURN
7486 **/
7487 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7488 {
7489 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7490 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7491 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7492 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7493
7494 ENTER;
7495 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7496 ioa_cfg->dual_raid = 1;
7497 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7498 ucode_vpd->major_release, ucode_vpd->card_type,
7499 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7500 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7501 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7502
7503 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7504 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7505 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7506 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7507
7508 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7509 IPR_IOADL_FLAGS_READ_LAST);
7510
7511 ipr_cmd->job_step = ipr_init_res_table;
7512
7513 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7514
7515 LEAVE;
7516 return IPR_RC_JOB_RETURN;
7517 }
7518
7519 /**
7520 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7521 * @ipr_cmd: ipr command struct
7522 *
7523 * This utility function sends an inquiry to the adapter.
7524 *
7525 * Return value:
7526 * none
7527 **/
7528 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7529 dma_addr_t dma_addr, u8 xfer_len)
7530 {
7531 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7532
7533 ENTER;
7534 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7535 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7536
7537 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7538 ioarcb->cmd_pkt.cdb[1] = flags;
7539 ioarcb->cmd_pkt.cdb[2] = page;
7540 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7541
7542 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7543
7544 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7545 LEAVE;
7546 }
7547
7548 /**
7549 * ipr_inquiry_page_supported - Is the given inquiry page supported
7550 * @page0: inquiry page 0 buffer
7551 * @page: page code.
7552 *
7553 * This function determines if the specified inquiry page is supported.
7554 *
7555 * Return value:
7556 * 1 if page is supported / 0 if not
7557 **/
7558 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7559 {
7560 int i;
7561
7562 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7563 if (page0->page[i] == page)
7564 return 1;
7565
7566 return 0;
7567 }
7568
7569 /**
7570 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7571 * @ipr_cmd: ipr command struct
7572 *
7573 * This function sends a Page 0xD0 inquiry to the adapter
7574 * to retrieve adapter capabilities.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7578 **/
7579 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7580 {
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7583 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7584
7585 ENTER;
7586 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7587 memset(cap, 0, sizeof(*cap));
7588
7589 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7590 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7591 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7592 sizeof(struct ipr_inquiry_cap));
7593 return IPR_RC_JOB_RETURN;
7594 }
7595
7596 LEAVE;
7597 return IPR_RC_JOB_CONTINUE;
7598 }
7599
7600 /**
7601 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7602 * @ipr_cmd: ipr command struct
7603 *
7604 * This function sends a Page 3 inquiry to the adapter
7605 * to retrieve software VPD information.
7606 *
7607 * Return value:
7608 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7609 **/
7610 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7611 {
7612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7613
7614 ENTER;
7615
7616 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7617
7618 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7619 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7620 sizeof(struct ipr_inquiry_page3));
7621
7622 LEAVE;
7623 return IPR_RC_JOB_RETURN;
7624 }
7625
7626 /**
7627 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7628 * @ipr_cmd: ipr command struct
7629 *
7630 * This function sends a Page 0 inquiry to the adapter
7631 * to retrieve supported inquiry pages.
7632 *
7633 * Return value:
7634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7635 **/
7636 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7637 {
7638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639 char type[5];
7640
7641 ENTER;
7642
7643 /* Grab the type out of the VPD and store it away */
7644 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7645 type[4] = '\0';
7646 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7647
7648 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7649
7650 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7651 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7652 sizeof(struct ipr_inquiry_page0));
7653
7654 LEAVE;
7655 return IPR_RC_JOB_RETURN;
7656 }
7657
7658 /**
7659 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7660 * @ipr_cmd: ipr command struct
7661 *
7662 * This function sends a standard inquiry to the adapter.
7663 *
7664 * Return value:
7665 * IPR_RC_JOB_RETURN
7666 **/
7667 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7668 {
7669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670
7671 ENTER;
7672 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7673
7674 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7675 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7676 sizeof(struct ipr_ioa_vpd));
7677
7678 LEAVE;
7679 return IPR_RC_JOB_RETURN;
7680 }
7681
7682 /**
7683 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7684 * @ipr_cmd: ipr command struct
7685 *
7686 * This function send an Identify Host Request Response Queue
7687 * command to establish the HRRQ with the adapter.
7688 *
7689 * Return value:
7690 * IPR_RC_JOB_RETURN
7691 **/
7692 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7693 {
7694 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7695 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7696 struct ipr_hrr_queue *hrrq;
7697
7698 ENTER;
7699 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7700 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7701
7702 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7703 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7704
7705 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7706 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7707
7708 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7709 if (ioa_cfg->sis64)
7710 ioarcb->cmd_pkt.cdb[1] = 0x1;
7711
7712 if (ioa_cfg->nvectors == 1)
7713 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7714 else
7715 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7716
7717 ioarcb->cmd_pkt.cdb[2] =
7718 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7719 ioarcb->cmd_pkt.cdb[3] =
7720 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7721 ioarcb->cmd_pkt.cdb[4] =
7722 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7723 ioarcb->cmd_pkt.cdb[5] =
7724 ((u64) hrrq->host_rrq_dma) & 0xff;
7725 ioarcb->cmd_pkt.cdb[7] =
7726 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7727 ioarcb->cmd_pkt.cdb[8] =
7728 (sizeof(u32) * hrrq->size) & 0xff;
7729
7730 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7731 ioarcb->cmd_pkt.cdb[9] =
7732 ioa_cfg->identify_hrrq_index;
7733
7734 if (ioa_cfg->sis64) {
7735 ioarcb->cmd_pkt.cdb[10] =
7736 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7737 ioarcb->cmd_pkt.cdb[11] =
7738 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7739 ioarcb->cmd_pkt.cdb[12] =
7740 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7741 ioarcb->cmd_pkt.cdb[13] =
7742 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7743 }
7744
7745 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7746 ioarcb->cmd_pkt.cdb[14] =
7747 ioa_cfg->identify_hrrq_index;
7748
7749 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7750 IPR_INTERNAL_TIMEOUT);
7751
7752 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7753 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7754
7755 LEAVE;
7756 return IPR_RC_JOB_RETURN;
7757 }
7758
7759 LEAVE;
7760 return IPR_RC_JOB_CONTINUE;
7761 }
7762
7763 /**
7764 * ipr_reset_timer_done - Adapter reset timer function
7765 * @ipr_cmd: ipr command struct
7766 *
7767 * Description: This function is used in adapter reset processing
7768 * for timing events. If the reset_cmd pointer in the IOA
7769 * config struct is not this adapter's we are doing nested
7770 * resets and fail_all_ops will take care of freeing the
7771 * command block.
7772 *
7773 * Return value:
7774 * none
7775 **/
7776 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7777 {
7778 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7779 unsigned long lock_flags = 0;
7780
7781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7782
7783 if (ioa_cfg->reset_cmd == ipr_cmd) {
7784 list_del(&ipr_cmd->queue);
7785 ipr_cmd->done(ipr_cmd);
7786 }
7787
7788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7789 }
7790
7791 /**
7792 * ipr_reset_start_timer - Start a timer for adapter reset job
7793 * @ipr_cmd: ipr command struct
7794 * @timeout: timeout value
7795 *
7796 * Description: This function is used in adapter reset processing
7797 * for timing events. If the reset_cmd pointer in the IOA
7798 * config struct is not this adapter's we are doing nested
7799 * resets and fail_all_ops will take care of freeing the
7800 * command block.
7801 *
7802 * Return value:
7803 * none
7804 **/
7805 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7806 unsigned long timeout)
7807 {
7808
7809 ENTER;
7810 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7811 ipr_cmd->done = ipr_reset_ioa_job;
7812
7813 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7814 ipr_cmd->timer.expires = jiffies + timeout;
7815 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7816 add_timer(&ipr_cmd->timer);
7817 }
7818
7819 /**
7820 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7821 * @ioa_cfg: ioa cfg struct
7822 *
7823 * Return value:
7824 * nothing
7825 **/
7826 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7827 {
7828 struct ipr_hrr_queue *hrrq;
7829
7830 for_each_hrrq(hrrq, ioa_cfg) {
7831 spin_lock(&hrrq->_lock);
7832 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7833
7834 /* Initialize Host RRQ pointers */
7835 hrrq->hrrq_start = hrrq->host_rrq;
7836 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7837 hrrq->hrrq_curr = hrrq->hrrq_start;
7838 hrrq->toggle_bit = 1;
7839 spin_unlock(&hrrq->_lock);
7840 }
7841 wmb();
7842
7843 ioa_cfg->identify_hrrq_index = 0;
7844 if (ioa_cfg->hrrq_num == 1)
7845 atomic_set(&ioa_cfg->hrrq_index, 0);
7846 else
7847 atomic_set(&ioa_cfg->hrrq_index, 1);
7848
7849 /* Zero out config table */
7850 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7851 }
7852
7853 /**
7854 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7855 * @ipr_cmd: ipr command struct
7856 *
7857 * Return value:
7858 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7859 **/
7860 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7861 {
7862 unsigned long stage, stage_time;
7863 u32 feedback;
7864 volatile u32 int_reg;
7865 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7866 u64 maskval = 0;
7867
7868 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7869 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7870 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7871
7872 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7873
7874 /* sanity check the stage_time value */
7875 if (stage_time == 0)
7876 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7877 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7878 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7879 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7880 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7881
7882 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7883 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7884 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7885 stage_time = ioa_cfg->transop_timeout;
7886 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7887 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7888 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7889 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7890 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7891 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7892 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7893 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7894 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7895 return IPR_RC_JOB_CONTINUE;
7896 }
7897 }
7898
7899 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7900 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7901 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7902 ipr_cmd->done = ipr_reset_ioa_job;
7903 add_timer(&ipr_cmd->timer);
7904
7905 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7906
7907 return IPR_RC_JOB_RETURN;
7908 }
7909
7910 /**
7911 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7912 * @ipr_cmd: ipr command struct
7913 *
7914 * This function reinitializes some control blocks and
7915 * enables destructive diagnostics on the adapter.
7916 *
7917 * Return value:
7918 * IPR_RC_JOB_RETURN
7919 **/
7920 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7921 {
7922 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7923 volatile u32 int_reg;
7924 volatile u64 maskval;
7925 int i;
7926
7927 ENTER;
7928 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7929 ipr_init_ioa_mem(ioa_cfg);
7930
7931 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7932 spin_lock(&ioa_cfg->hrrq[i]._lock);
7933 ioa_cfg->hrrq[i].allow_interrupts = 1;
7934 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7935 }
7936 wmb();
7937 if (ioa_cfg->sis64) {
7938 /* Set the adapter to the correct endian mode. */
7939 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7940 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7941 }
7942
7943 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7944
7945 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7946 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7947 ioa_cfg->regs.clr_interrupt_mask_reg32);
7948 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7949 return IPR_RC_JOB_CONTINUE;
7950 }
7951
7952 /* Enable destructive diagnostics on IOA */
7953 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7954
7955 if (ioa_cfg->sis64) {
7956 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7957 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7958 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7959 } else
7960 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7961
7962 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7963
7964 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7965
7966 if (ioa_cfg->sis64) {
7967 ipr_cmd->job_step = ipr_reset_next_stage;
7968 return IPR_RC_JOB_CONTINUE;
7969 }
7970
7971 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7972 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7973 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7974 ipr_cmd->done = ipr_reset_ioa_job;
7975 add_timer(&ipr_cmd->timer);
7976 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7977
7978 LEAVE;
7979 return IPR_RC_JOB_RETURN;
7980 }
7981
7982 /**
7983 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7984 * @ipr_cmd: ipr command struct
7985 *
7986 * This function is invoked when an adapter dump has run out
7987 * of processing time.
7988 *
7989 * Return value:
7990 * IPR_RC_JOB_CONTINUE
7991 **/
7992 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7993 {
7994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7995
7996 if (ioa_cfg->sdt_state == GET_DUMP)
7997 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7998 else if (ioa_cfg->sdt_state == READ_DUMP)
7999 ioa_cfg->sdt_state = ABORT_DUMP;
8000
8001 ioa_cfg->dump_timeout = 1;
8002 ipr_cmd->job_step = ipr_reset_alert;
8003
8004 return IPR_RC_JOB_CONTINUE;
8005 }
8006
8007 /**
8008 * ipr_unit_check_no_data - Log a unit check/no data error log
8009 * @ioa_cfg: ioa config struct
8010 *
8011 * Logs an error indicating the adapter unit checked, but for some
8012 * reason, we were unable to fetch the unit check buffer.
8013 *
8014 * Return value:
8015 * nothing
8016 **/
8017 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8018 {
8019 ioa_cfg->errors_logged++;
8020 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8021 }
8022
8023 /**
8024 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8025 * @ioa_cfg: ioa config struct
8026 *
8027 * Fetches the unit check buffer from the adapter by clocking the data
8028 * through the mailbox register.
8029 *
8030 * Return value:
8031 * nothing
8032 **/
8033 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8034 {
8035 unsigned long mailbox;
8036 struct ipr_hostrcb *hostrcb;
8037 struct ipr_uc_sdt sdt;
8038 int rc, length;
8039 u32 ioasc;
8040
8041 mailbox = readl(ioa_cfg->ioa_mailbox);
8042
8043 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8044 ipr_unit_check_no_data(ioa_cfg);
8045 return;
8046 }
8047
8048 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8049 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8050 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8051
8052 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8053 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8054 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8055 ipr_unit_check_no_data(ioa_cfg);
8056 return;
8057 }
8058
8059 /* Find length of the first sdt entry (UC buffer) */
8060 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8061 length = be32_to_cpu(sdt.entry[0].end_token);
8062 else
8063 length = (be32_to_cpu(sdt.entry[0].end_token) -
8064 be32_to_cpu(sdt.entry[0].start_token)) &
8065 IPR_FMT2_MBX_ADDR_MASK;
8066
8067 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8068 struct ipr_hostrcb, queue);
8069 list_del(&hostrcb->queue);
8070 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8071
8072 rc = ipr_get_ldump_data_section(ioa_cfg,
8073 be32_to_cpu(sdt.entry[0].start_token),
8074 (__be32 *)&hostrcb->hcam,
8075 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8076
8077 if (!rc) {
8078 ipr_handle_log_data(ioa_cfg, hostrcb);
8079 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8080 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8081 ioa_cfg->sdt_state == GET_DUMP)
8082 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8083 } else
8084 ipr_unit_check_no_data(ioa_cfg);
8085
8086 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8087 }
8088
8089 /**
8090 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8091 * @ipr_cmd: ipr command struct
8092 *
8093 * Description: This function will call to get the unit check buffer.
8094 *
8095 * Return value:
8096 * IPR_RC_JOB_RETURN
8097 **/
8098 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8099 {
8100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8101
8102 ENTER;
8103 ioa_cfg->ioa_unit_checked = 0;
8104 ipr_get_unit_check_buffer(ioa_cfg);
8105 ipr_cmd->job_step = ipr_reset_alert;
8106 ipr_reset_start_timer(ipr_cmd, 0);
8107
8108 LEAVE;
8109 return IPR_RC_JOB_RETURN;
8110 }
8111
8112 /**
8113 * ipr_reset_restore_cfg_space - Restore PCI config space.
8114 * @ipr_cmd: ipr command struct
8115 *
8116 * Description: This function restores the saved PCI config space of
8117 * the adapter, fails all outstanding ops back to the callers, and
8118 * fetches the dump/unit check if applicable to this reset.
8119 *
8120 * Return value:
8121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8122 **/
8123 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8124 {
8125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8126 u32 int_reg;
8127
8128 ENTER;
8129 ioa_cfg->pdev->state_saved = true;
8130 pci_restore_state(ioa_cfg->pdev);
8131
8132 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8133 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8134 return IPR_RC_JOB_CONTINUE;
8135 }
8136
8137 ipr_fail_all_ops(ioa_cfg);
8138
8139 if (ioa_cfg->sis64) {
8140 /* Set the adapter to the correct endian mode. */
8141 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8142 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8143 }
8144
8145 if (ioa_cfg->ioa_unit_checked) {
8146 if (ioa_cfg->sis64) {
8147 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8148 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8149 return IPR_RC_JOB_RETURN;
8150 } else {
8151 ioa_cfg->ioa_unit_checked = 0;
8152 ipr_get_unit_check_buffer(ioa_cfg);
8153 ipr_cmd->job_step = ipr_reset_alert;
8154 ipr_reset_start_timer(ipr_cmd, 0);
8155 return IPR_RC_JOB_RETURN;
8156 }
8157 }
8158
8159 if (ioa_cfg->in_ioa_bringdown) {
8160 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8161 } else {
8162 ipr_cmd->job_step = ipr_reset_enable_ioa;
8163
8164 if (GET_DUMP == ioa_cfg->sdt_state) {
8165 ioa_cfg->sdt_state = READ_DUMP;
8166 ioa_cfg->dump_timeout = 0;
8167 if (ioa_cfg->sis64)
8168 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8169 else
8170 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8171 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8172 schedule_work(&ioa_cfg->work_q);
8173 return IPR_RC_JOB_RETURN;
8174 }
8175 }
8176
8177 LEAVE;
8178 return IPR_RC_JOB_CONTINUE;
8179 }
8180
8181 /**
8182 * ipr_reset_bist_done - BIST has completed on the adapter.
8183 * @ipr_cmd: ipr command struct
8184 *
8185 * Description: Unblock config space and resume the reset process.
8186 *
8187 * Return value:
8188 * IPR_RC_JOB_CONTINUE
8189 **/
8190 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8191 {
8192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8193
8194 ENTER;
8195 if (ioa_cfg->cfg_locked)
8196 pci_cfg_access_unlock(ioa_cfg->pdev);
8197 ioa_cfg->cfg_locked = 0;
8198 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8199 LEAVE;
8200 return IPR_RC_JOB_CONTINUE;
8201 }
8202
8203 /**
8204 * ipr_reset_start_bist - Run BIST on the adapter.
8205 * @ipr_cmd: ipr command struct
8206 *
8207 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8208 *
8209 * Return value:
8210 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8211 **/
8212 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8213 {
8214 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8215 int rc = PCIBIOS_SUCCESSFUL;
8216
8217 ENTER;
8218 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8219 writel(IPR_UPROCI_SIS64_START_BIST,
8220 ioa_cfg->regs.set_uproc_interrupt_reg32);
8221 else
8222 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8223
8224 if (rc == PCIBIOS_SUCCESSFUL) {
8225 ipr_cmd->job_step = ipr_reset_bist_done;
8226 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8227 rc = IPR_RC_JOB_RETURN;
8228 } else {
8229 if (ioa_cfg->cfg_locked)
8230 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8231 ioa_cfg->cfg_locked = 0;
8232 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8233 rc = IPR_RC_JOB_CONTINUE;
8234 }
8235
8236 LEAVE;
8237 return rc;
8238 }
8239
8240 /**
8241 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8242 * @ipr_cmd: ipr command struct
8243 *
8244 * Description: This clears PCI reset to the adapter and delays two seconds.
8245 *
8246 * Return value:
8247 * IPR_RC_JOB_RETURN
8248 **/
8249 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8250 {
8251 ENTER;
8252 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8253 ipr_cmd->job_step = ipr_reset_bist_done;
8254 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8255 LEAVE;
8256 return IPR_RC_JOB_RETURN;
8257 }
8258
8259 /**
8260 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8261 * @ipr_cmd: ipr command struct
8262 *
8263 * Description: This asserts PCI reset to the adapter.
8264 *
8265 * Return value:
8266 * IPR_RC_JOB_RETURN
8267 **/
8268 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8269 {
8270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8271 struct pci_dev *pdev = ioa_cfg->pdev;
8272
8273 ENTER;
8274 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8275 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8276 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8277 LEAVE;
8278 return IPR_RC_JOB_RETURN;
8279 }
8280
8281 /**
8282 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8283 * @ipr_cmd: ipr command struct
8284 *
8285 * Description: This attempts to block config access to the IOA.
8286 *
8287 * Return value:
8288 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8289 **/
8290 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8291 {
8292 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8293 int rc = IPR_RC_JOB_CONTINUE;
8294
8295 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8296 ioa_cfg->cfg_locked = 1;
8297 ipr_cmd->job_step = ioa_cfg->reset;
8298 } else {
8299 if (ipr_cmd->u.time_left) {
8300 rc = IPR_RC_JOB_RETURN;
8301 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8302 ipr_reset_start_timer(ipr_cmd,
8303 IPR_CHECK_FOR_RESET_TIMEOUT);
8304 } else {
8305 ipr_cmd->job_step = ioa_cfg->reset;
8306 dev_err(&ioa_cfg->pdev->dev,
8307 "Timed out waiting to lock config access. Resetting anyway.\n");
8308 }
8309 }
8310
8311 return rc;
8312 }
8313
8314 /**
8315 * ipr_reset_block_config_access - Block config access to the IOA
8316 * @ipr_cmd: ipr command struct
8317 *
8318 * Description: This attempts to block config access to the IOA
8319 *
8320 * Return value:
8321 * IPR_RC_JOB_CONTINUE
8322 **/
8323 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8324 {
8325 ipr_cmd->ioa_cfg->cfg_locked = 0;
8326 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8327 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8328 return IPR_RC_JOB_CONTINUE;
8329 }
8330
8331 /**
8332 * ipr_reset_allowed - Query whether or not IOA can be reset
8333 * @ioa_cfg: ioa config struct
8334 *
8335 * Return value:
8336 * 0 if reset not allowed / non-zero if reset is allowed
8337 **/
8338 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8339 {
8340 volatile u32 temp_reg;
8341
8342 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8343 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8344 }
8345
8346 /**
8347 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8348 * @ipr_cmd: ipr command struct
8349 *
8350 * Description: This function waits for adapter permission to run BIST,
8351 * then runs BIST. If the adapter does not give permission after a
8352 * reasonable time, we will reset the adapter anyway. The impact of
8353 * resetting the adapter without warning the adapter is the risk of
8354 * losing the persistent error log on the adapter. If the adapter is
8355 * reset while it is writing to the flash on the adapter, the flash
8356 * segment will have bad ECC and be zeroed.
8357 *
8358 * Return value:
8359 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8360 **/
8361 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8362 {
8363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8364 int rc = IPR_RC_JOB_RETURN;
8365
8366 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8367 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8368 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8369 } else {
8370 ipr_cmd->job_step = ipr_reset_block_config_access;
8371 rc = IPR_RC_JOB_CONTINUE;
8372 }
8373
8374 return rc;
8375 }
8376
8377 /**
8378 * ipr_reset_alert - Alert the adapter of a pending reset
8379 * @ipr_cmd: ipr command struct
8380 *
8381 * Description: This function alerts the adapter that it will be reset.
8382 * If memory space is not currently enabled, proceed directly
8383 * to running BIST on the adapter. The timer must always be started
8384 * so we guarantee we do not run BIST from ipr_isr.
8385 *
8386 * Return value:
8387 * IPR_RC_JOB_RETURN
8388 **/
8389 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8390 {
8391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8392 u16 cmd_reg;
8393 int rc;
8394
8395 ENTER;
8396 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8397
8398 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8399 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8400 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8401 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8402 } else {
8403 ipr_cmd->job_step = ipr_reset_block_config_access;
8404 }
8405
8406 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8407 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8408
8409 LEAVE;
8410 return IPR_RC_JOB_RETURN;
8411 }
8412
8413 /**
8414 * ipr_reset_ucode_download_done - Microcode download completion
8415 * @ipr_cmd: ipr command struct
8416 *
8417 * Description: This function unmaps the microcode download buffer.
8418 *
8419 * Return value:
8420 * IPR_RC_JOB_CONTINUE
8421 **/
8422 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8423 {
8424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8425 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8426
8427 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8428 sglist->num_sg, DMA_TO_DEVICE);
8429
8430 ipr_cmd->job_step = ipr_reset_alert;
8431 return IPR_RC_JOB_CONTINUE;
8432 }
8433
8434 /**
8435 * ipr_reset_ucode_download - Download microcode to the adapter
8436 * @ipr_cmd: ipr command struct
8437 *
8438 * Description: This function checks to see if it there is microcode
8439 * to download to the adapter. If there is, a download is performed.
8440 *
8441 * Return value:
8442 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8443 **/
8444 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8445 {
8446 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8447 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8448
8449 ENTER;
8450 ipr_cmd->job_step = ipr_reset_alert;
8451
8452 if (!sglist)
8453 return IPR_RC_JOB_CONTINUE;
8454
8455 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8456 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8457 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8458 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8459 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8460 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8461 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8462
8463 if (ioa_cfg->sis64)
8464 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8465 else
8466 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8467 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8468
8469 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8470 IPR_WRITE_BUFFER_TIMEOUT);
8471
8472 LEAVE;
8473 return IPR_RC_JOB_RETURN;
8474 }
8475
8476 /**
8477 * ipr_reset_shutdown_ioa - Shutdown the adapter
8478 * @ipr_cmd: ipr command struct
8479 *
8480 * Description: This function issues an adapter shutdown of the
8481 * specified type to the specified adapter as part of the
8482 * adapter reset job.
8483 *
8484 * Return value:
8485 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8486 **/
8487 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8488 {
8489 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8490 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8491 unsigned long timeout;
8492 int rc = IPR_RC_JOB_CONTINUE;
8493
8494 ENTER;
8495 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8496 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8497 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8498 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8499 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8500 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8501
8502 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8503 timeout = IPR_SHUTDOWN_TIMEOUT;
8504 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8505 timeout = IPR_INTERNAL_TIMEOUT;
8506 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8507 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8508 else
8509 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8510
8511 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8512
8513 rc = IPR_RC_JOB_RETURN;
8514 ipr_cmd->job_step = ipr_reset_ucode_download;
8515 } else
8516 ipr_cmd->job_step = ipr_reset_alert;
8517
8518 LEAVE;
8519 return rc;
8520 }
8521
8522 /**
8523 * ipr_reset_ioa_job - Adapter reset job
8524 * @ipr_cmd: ipr command struct
8525 *
8526 * Description: This function is the job router for the adapter reset job.
8527 *
8528 * Return value:
8529 * none
8530 **/
8531 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8532 {
8533 u32 rc, ioasc;
8534 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8535
8536 do {
8537 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8538
8539 if (ioa_cfg->reset_cmd != ipr_cmd) {
8540 /*
8541 * We are doing nested adapter resets and this is
8542 * not the current reset job.
8543 */
8544 list_add_tail(&ipr_cmd->queue,
8545 &ipr_cmd->hrrq->hrrq_free_q);
8546 return;
8547 }
8548
8549 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8550 rc = ipr_cmd->job_step_failed(ipr_cmd);
8551 if (rc == IPR_RC_JOB_RETURN)
8552 return;
8553 }
8554
8555 ipr_reinit_ipr_cmnd(ipr_cmd);
8556 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8557 rc = ipr_cmd->job_step(ipr_cmd);
8558 } while (rc == IPR_RC_JOB_CONTINUE);
8559 }
8560
8561 /**
8562 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8563 * @ioa_cfg: ioa config struct
8564 * @job_step: first job step of reset job
8565 * @shutdown_type: shutdown type
8566 *
8567 * Description: This function will initiate the reset of the given adapter
8568 * starting at the selected job step.
8569 * If the caller needs to wait on the completion of the reset,
8570 * the caller must sleep on the reset_wait_q.
8571 *
8572 * Return value:
8573 * none
8574 **/
8575 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8576 int (*job_step) (struct ipr_cmnd *),
8577 enum ipr_shutdown_type shutdown_type)
8578 {
8579 struct ipr_cmnd *ipr_cmd;
8580 int i;
8581
8582 ioa_cfg->in_reset_reload = 1;
8583 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8584 spin_lock(&ioa_cfg->hrrq[i]._lock);
8585 ioa_cfg->hrrq[i].allow_cmds = 0;
8586 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8587 }
8588 wmb();
8589 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8590 scsi_block_requests(ioa_cfg->host);
8591
8592 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8593 ioa_cfg->reset_cmd = ipr_cmd;
8594 ipr_cmd->job_step = job_step;
8595 ipr_cmd->u.shutdown_type = shutdown_type;
8596
8597 ipr_reset_ioa_job(ipr_cmd);
8598 }
8599
8600 /**
8601 * ipr_initiate_ioa_reset - Initiate an adapter reset
8602 * @ioa_cfg: ioa config struct
8603 * @shutdown_type: shutdown type
8604 *
8605 * Description: This function will initiate the reset of the given adapter.
8606 * If the caller needs to wait on the completion of the reset,
8607 * the caller must sleep on the reset_wait_q.
8608 *
8609 * Return value:
8610 * none
8611 **/
8612 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8613 enum ipr_shutdown_type shutdown_type)
8614 {
8615 int i;
8616
8617 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8618 return;
8619
8620 if (ioa_cfg->in_reset_reload) {
8621 if (ioa_cfg->sdt_state == GET_DUMP)
8622 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8623 else if (ioa_cfg->sdt_state == READ_DUMP)
8624 ioa_cfg->sdt_state = ABORT_DUMP;
8625 }
8626
8627 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8628 dev_err(&ioa_cfg->pdev->dev,
8629 "IOA taken offline - error recovery failed\n");
8630
8631 ioa_cfg->reset_retries = 0;
8632 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8633 spin_lock(&ioa_cfg->hrrq[i]._lock);
8634 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8635 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8636 }
8637 wmb();
8638
8639 if (ioa_cfg->in_ioa_bringdown) {
8640 ioa_cfg->reset_cmd = NULL;
8641 ioa_cfg->in_reset_reload = 0;
8642 ipr_fail_all_ops(ioa_cfg);
8643 wake_up_all(&ioa_cfg->reset_wait_q);
8644
8645 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8646 spin_unlock_irq(ioa_cfg->host->host_lock);
8647 scsi_unblock_requests(ioa_cfg->host);
8648 spin_lock_irq(ioa_cfg->host->host_lock);
8649 }
8650 return;
8651 } else {
8652 ioa_cfg->in_ioa_bringdown = 1;
8653 shutdown_type = IPR_SHUTDOWN_NONE;
8654 }
8655 }
8656
8657 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8658 shutdown_type);
8659 }
8660
8661 /**
8662 * ipr_reset_freeze - Hold off all I/O activity
8663 * @ipr_cmd: ipr command struct
8664 *
8665 * Description: If the PCI slot is frozen, hold off all I/O
8666 * activity; then, as soon as the slot is available again,
8667 * initiate an adapter reset.
8668 */
8669 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8670 {
8671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8672 int i;
8673
8674 /* Disallow new interrupts, avoid loop */
8675 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8676 spin_lock(&ioa_cfg->hrrq[i]._lock);
8677 ioa_cfg->hrrq[i].allow_interrupts = 0;
8678 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8679 }
8680 wmb();
8681 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8682 ipr_cmd->done = ipr_reset_ioa_job;
8683 return IPR_RC_JOB_RETURN;
8684 }
8685
8686 /**
8687 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8688 * @pdev: PCI device struct
8689 *
8690 * Description: This routine is called to tell us that the MMIO
8691 * access to the IOA has been restored
8692 */
8693 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8694 {
8695 unsigned long flags = 0;
8696 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8697
8698 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8699 if (!ioa_cfg->probe_done)
8700 pci_save_state(pdev);
8701 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8702 return PCI_ERS_RESULT_NEED_RESET;
8703 }
8704
8705 /**
8706 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8707 * @pdev: PCI device struct
8708 *
8709 * Description: This routine is called to tell us that the PCI bus
8710 * is down. Can't do anything here, except put the device driver
8711 * into a holding pattern, waiting for the PCI bus to come back.
8712 */
8713 static void ipr_pci_frozen(struct pci_dev *pdev)
8714 {
8715 unsigned long flags = 0;
8716 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8717
8718 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8719 if (ioa_cfg->probe_done)
8720 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8722 }
8723
8724 /**
8725 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8726 * @pdev: PCI device struct
8727 *
8728 * Description: This routine is called by the pci error recovery
8729 * code after the PCI slot has been reset, just before we
8730 * should resume normal operations.
8731 */
8732 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8733 {
8734 unsigned long flags = 0;
8735 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8736
8737 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8738 if (ioa_cfg->probe_done) {
8739 if (ioa_cfg->needs_warm_reset)
8740 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8741 else
8742 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8743 IPR_SHUTDOWN_NONE);
8744 } else
8745 wake_up_all(&ioa_cfg->eeh_wait_q);
8746 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8747 return PCI_ERS_RESULT_RECOVERED;
8748 }
8749
8750 /**
8751 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8752 * @pdev: PCI device struct
8753 *
8754 * Description: This routine is called when the PCI bus has
8755 * permanently failed.
8756 */
8757 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8758 {
8759 unsigned long flags = 0;
8760 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8761 int i;
8762
8763 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8764 if (ioa_cfg->probe_done) {
8765 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8766 ioa_cfg->sdt_state = ABORT_DUMP;
8767 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8768 ioa_cfg->in_ioa_bringdown = 1;
8769 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8770 spin_lock(&ioa_cfg->hrrq[i]._lock);
8771 ioa_cfg->hrrq[i].allow_cmds = 0;
8772 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8773 }
8774 wmb();
8775 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8776 } else
8777 wake_up_all(&ioa_cfg->eeh_wait_q);
8778 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8779 }
8780
8781 /**
8782 * ipr_pci_error_detected - Called when a PCI error is detected.
8783 * @pdev: PCI device struct
8784 * @state: PCI channel state
8785 *
8786 * Description: Called when a PCI error is detected.
8787 *
8788 * Return value:
8789 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8790 */
8791 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8792 pci_channel_state_t state)
8793 {
8794 switch (state) {
8795 case pci_channel_io_frozen:
8796 ipr_pci_frozen(pdev);
8797 return PCI_ERS_RESULT_CAN_RECOVER;
8798 case pci_channel_io_perm_failure:
8799 ipr_pci_perm_failure(pdev);
8800 return PCI_ERS_RESULT_DISCONNECT;
8801 break;
8802 default:
8803 break;
8804 }
8805 return PCI_ERS_RESULT_NEED_RESET;
8806 }
8807
8808 /**
8809 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8810 * @ioa_cfg: ioa cfg struct
8811 *
8812 * Description: This is the second phase of adapter intialization
8813 * This function takes care of initilizing the adapter to the point
8814 * where it can accept new commands.
8815
8816 * Return value:
8817 * 0 on success / -EIO on failure
8818 **/
8819 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8820 {
8821 int rc = 0;
8822 unsigned long host_lock_flags = 0;
8823
8824 ENTER;
8825 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8826 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8827 ioa_cfg->probe_done = 1;
8828 if (ioa_cfg->needs_hard_reset) {
8829 ioa_cfg->needs_hard_reset = 0;
8830 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8831 } else
8832 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8833 IPR_SHUTDOWN_NONE);
8834 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8835 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8836 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8837
8838 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8839 rc = -EIO;
8840 } else if (ipr_invalid_adapter(ioa_cfg)) {
8841 if (!ipr_testmode)
8842 rc = -EIO;
8843
8844 dev_err(&ioa_cfg->pdev->dev,
8845 "Adapter not supported in this hardware configuration.\n");
8846 }
8847
8848 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8849
8850 LEAVE;
8851 return rc;
8852 }
8853
8854 /**
8855 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8856 * @ioa_cfg: ioa config struct
8857 *
8858 * Return value:
8859 * none
8860 **/
8861 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8862 {
8863 int i;
8864
8865 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8866 if (ioa_cfg->ipr_cmnd_list[i])
8867 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8868 ioa_cfg->ipr_cmnd_list[i],
8869 ioa_cfg->ipr_cmnd_list_dma[i]);
8870
8871 ioa_cfg->ipr_cmnd_list[i] = NULL;
8872 }
8873
8874 if (ioa_cfg->ipr_cmd_pool)
8875 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8876
8877 kfree(ioa_cfg->ipr_cmnd_list);
8878 kfree(ioa_cfg->ipr_cmnd_list_dma);
8879 ioa_cfg->ipr_cmnd_list = NULL;
8880 ioa_cfg->ipr_cmnd_list_dma = NULL;
8881 ioa_cfg->ipr_cmd_pool = NULL;
8882 }
8883
8884 /**
8885 * ipr_free_mem - Frees memory allocated for an adapter
8886 * @ioa_cfg: ioa cfg struct
8887 *
8888 * Return value:
8889 * nothing
8890 **/
8891 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8892 {
8893 int i;
8894
8895 kfree(ioa_cfg->res_entries);
8896 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8897 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8898 ipr_free_cmd_blks(ioa_cfg);
8899
8900 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8901 pci_free_consistent(ioa_cfg->pdev,
8902 sizeof(u32) * ioa_cfg->hrrq[i].size,
8903 ioa_cfg->hrrq[i].host_rrq,
8904 ioa_cfg->hrrq[i].host_rrq_dma);
8905
8906 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8907 ioa_cfg->u.cfg_table,
8908 ioa_cfg->cfg_table_dma);
8909
8910 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8911 pci_free_consistent(ioa_cfg->pdev,
8912 sizeof(struct ipr_hostrcb),
8913 ioa_cfg->hostrcb[i],
8914 ioa_cfg->hostrcb_dma[i]);
8915 }
8916
8917 ipr_free_dump(ioa_cfg);
8918 kfree(ioa_cfg->trace);
8919 }
8920
8921 /**
8922 * ipr_free_all_resources - Free all allocated resources for an adapter.
8923 * @ipr_cmd: ipr command struct
8924 *
8925 * This function frees all allocated resources for the
8926 * specified adapter.
8927 *
8928 * Return value:
8929 * none
8930 **/
8931 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8932 {
8933 struct pci_dev *pdev = ioa_cfg->pdev;
8934
8935 ENTER;
8936 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8937 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8938 int i;
8939 for (i = 0; i < ioa_cfg->nvectors; i++)
8940 free_irq(ioa_cfg->vectors_info[i].vec,
8941 &ioa_cfg->hrrq[i]);
8942 } else
8943 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8944
8945 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8946 pci_disable_msi(pdev);
8947 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8948 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8949 pci_disable_msix(pdev);
8950 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8951 }
8952
8953 iounmap(ioa_cfg->hdw_dma_regs);
8954 pci_release_regions(pdev);
8955 ipr_free_mem(ioa_cfg);
8956 scsi_host_put(ioa_cfg->host);
8957 pci_disable_device(pdev);
8958 LEAVE;
8959 }
8960
8961 /**
8962 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8963 * @ioa_cfg: ioa config struct
8964 *
8965 * Return value:
8966 * 0 on success / -ENOMEM on allocation failure
8967 **/
8968 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8969 {
8970 struct ipr_cmnd *ipr_cmd;
8971 struct ipr_ioarcb *ioarcb;
8972 dma_addr_t dma_addr;
8973 int i, entries_each_hrrq, hrrq_id = 0;
8974
8975 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8976 sizeof(struct ipr_cmnd), 512, 0);
8977
8978 if (!ioa_cfg->ipr_cmd_pool)
8979 return -ENOMEM;
8980
8981 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8982 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8983
8984 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8985 ipr_free_cmd_blks(ioa_cfg);
8986 return -ENOMEM;
8987 }
8988
8989 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8990 if (ioa_cfg->hrrq_num > 1) {
8991 if (i == 0) {
8992 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8993 ioa_cfg->hrrq[i].min_cmd_id = 0;
8994 ioa_cfg->hrrq[i].max_cmd_id =
8995 (entries_each_hrrq - 1);
8996 } else {
8997 entries_each_hrrq =
8998 IPR_NUM_BASE_CMD_BLKS/
8999 (ioa_cfg->hrrq_num - 1);
9000 ioa_cfg->hrrq[i].min_cmd_id =
9001 IPR_NUM_INTERNAL_CMD_BLKS +
9002 (i - 1) * entries_each_hrrq;
9003 ioa_cfg->hrrq[i].max_cmd_id =
9004 (IPR_NUM_INTERNAL_CMD_BLKS +
9005 i * entries_each_hrrq - 1);
9006 }
9007 } else {
9008 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9009 ioa_cfg->hrrq[i].min_cmd_id = 0;
9010 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9011 }
9012 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9013 }
9014
9015 BUG_ON(ioa_cfg->hrrq_num == 0);
9016
9017 i = IPR_NUM_CMD_BLKS -
9018 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9019 if (i > 0) {
9020 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9021 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9022 }
9023
9024 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9025 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9026
9027 if (!ipr_cmd) {
9028 ipr_free_cmd_blks(ioa_cfg);
9029 return -ENOMEM;
9030 }
9031
9032 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9033 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9034 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9035
9036 ioarcb = &ipr_cmd->ioarcb;
9037 ipr_cmd->dma_addr = dma_addr;
9038 if (ioa_cfg->sis64)
9039 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9040 else
9041 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9042
9043 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9044 if (ioa_cfg->sis64) {
9045 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9046 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9047 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9048 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9049 } else {
9050 ioarcb->write_ioadl_addr =
9051 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9052 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9053 ioarcb->ioasa_host_pci_addr =
9054 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9055 }
9056 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9057 ipr_cmd->cmd_index = i;
9058 ipr_cmd->ioa_cfg = ioa_cfg;
9059 ipr_cmd->sense_buffer_dma = dma_addr +
9060 offsetof(struct ipr_cmnd, sense_buffer);
9061
9062 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9063 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9064 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9065 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9066 hrrq_id++;
9067 }
9068
9069 return 0;
9070 }
9071
9072 /**
9073 * ipr_alloc_mem - Allocate memory for an adapter
9074 * @ioa_cfg: ioa config struct
9075 *
9076 * Return value:
9077 * 0 on success / non-zero for error
9078 **/
9079 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9080 {
9081 struct pci_dev *pdev = ioa_cfg->pdev;
9082 int i, rc = -ENOMEM;
9083
9084 ENTER;
9085 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9086 ioa_cfg->max_devs_supported, GFP_KERNEL);
9087
9088 if (!ioa_cfg->res_entries)
9089 goto out;
9090
9091 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9092 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9093 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9094 }
9095
9096 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9097 sizeof(struct ipr_misc_cbs),
9098 &ioa_cfg->vpd_cbs_dma);
9099
9100 if (!ioa_cfg->vpd_cbs)
9101 goto out_free_res_entries;
9102
9103 if (ipr_alloc_cmd_blks(ioa_cfg))
9104 goto out_free_vpd_cbs;
9105
9106 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9107 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9108 sizeof(u32) * ioa_cfg->hrrq[i].size,
9109 &ioa_cfg->hrrq[i].host_rrq_dma);
9110
9111 if (!ioa_cfg->hrrq[i].host_rrq) {
9112 while (--i > 0)
9113 pci_free_consistent(pdev,
9114 sizeof(u32) * ioa_cfg->hrrq[i].size,
9115 ioa_cfg->hrrq[i].host_rrq,
9116 ioa_cfg->hrrq[i].host_rrq_dma);
9117 goto out_ipr_free_cmd_blocks;
9118 }
9119 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9120 }
9121
9122 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9123 ioa_cfg->cfg_table_size,
9124 &ioa_cfg->cfg_table_dma);
9125
9126 if (!ioa_cfg->u.cfg_table)
9127 goto out_free_host_rrq;
9128
9129 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9130 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9131 sizeof(struct ipr_hostrcb),
9132 &ioa_cfg->hostrcb_dma[i]);
9133
9134 if (!ioa_cfg->hostrcb[i])
9135 goto out_free_hostrcb_dma;
9136
9137 ioa_cfg->hostrcb[i]->hostrcb_dma =
9138 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9139 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9140 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9141 }
9142
9143 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9144 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9145
9146 if (!ioa_cfg->trace)
9147 goto out_free_hostrcb_dma;
9148
9149 rc = 0;
9150 out:
9151 LEAVE;
9152 return rc;
9153
9154 out_free_hostrcb_dma:
9155 while (i-- > 0) {
9156 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9157 ioa_cfg->hostrcb[i],
9158 ioa_cfg->hostrcb_dma[i]);
9159 }
9160 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9161 ioa_cfg->u.cfg_table,
9162 ioa_cfg->cfg_table_dma);
9163 out_free_host_rrq:
9164 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9165 pci_free_consistent(pdev,
9166 sizeof(u32) * ioa_cfg->hrrq[i].size,
9167 ioa_cfg->hrrq[i].host_rrq,
9168 ioa_cfg->hrrq[i].host_rrq_dma);
9169 }
9170 out_ipr_free_cmd_blocks:
9171 ipr_free_cmd_blks(ioa_cfg);
9172 out_free_vpd_cbs:
9173 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9174 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9175 out_free_res_entries:
9176 kfree(ioa_cfg->res_entries);
9177 goto out;
9178 }
9179
9180 /**
9181 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9182 * @ioa_cfg: ioa config struct
9183 *
9184 * Return value:
9185 * none
9186 **/
9187 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9188 {
9189 int i;
9190
9191 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9192 ioa_cfg->bus_attr[i].bus = i;
9193 ioa_cfg->bus_attr[i].qas_enabled = 0;
9194 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9195 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9196 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9197 else
9198 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9199 }
9200 }
9201
9202 /**
9203 * ipr_init_regs - Initialize IOA registers
9204 * @ioa_cfg: ioa config struct
9205 *
9206 * Return value:
9207 * none
9208 **/
9209 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9210 {
9211 const struct ipr_interrupt_offsets *p;
9212 struct ipr_interrupts *t;
9213 void __iomem *base;
9214
9215 p = &ioa_cfg->chip_cfg->regs;
9216 t = &ioa_cfg->regs;
9217 base = ioa_cfg->hdw_dma_regs;
9218
9219 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9220 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9221 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9222 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9223 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9224 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9225 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9226 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9227 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9228 t->ioarrin_reg = base + p->ioarrin_reg;
9229 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9230 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9231 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9232 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9233 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9234 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9235
9236 if (ioa_cfg->sis64) {
9237 t->init_feedback_reg = base + p->init_feedback_reg;
9238 t->dump_addr_reg = base + p->dump_addr_reg;
9239 t->dump_data_reg = base + p->dump_data_reg;
9240 t->endian_swap_reg = base + p->endian_swap_reg;
9241 }
9242 }
9243
9244 /**
9245 * ipr_init_ioa_cfg - Initialize IOA config struct
9246 * @ioa_cfg: ioa config struct
9247 * @host: scsi host struct
9248 * @pdev: PCI dev struct
9249 *
9250 * Return value:
9251 * none
9252 **/
9253 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9254 struct Scsi_Host *host, struct pci_dev *pdev)
9255 {
9256 int i;
9257
9258 ioa_cfg->host = host;
9259 ioa_cfg->pdev = pdev;
9260 ioa_cfg->log_level = ipr_log_level;
9261 ioa_cfg->doorbell = IPR_DOORBELL;
9262 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9263 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9264 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9265 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9266 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9267 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9268
9269 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9270 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9271 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9272 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9273 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9274 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9275 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9276 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9277 ioa_cfg->sdt_state = INACTIVE;
9278
9279 ipr_initialize_bus_attr(ioa_cfg);
9280 ioa_cfg->max_devs_supported = ipr_max_devs;
9281
9282 if (ioa_cfg->sis64) {
9283 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9284 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9285 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9286 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9287 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9288 + ((sizeof(struct ipr_config_table_entry64)
9289 * ioa_cfg->max_devs_supported)));
9290 } else {
9291 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9292 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9293 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9294 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9295 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9296 + ((sizeof(struct ipr_config_table_entry)
9297 * ioa_cfg->max_devs_supported)));
9298 }
9299
9300 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9301 host->unique_id = host->host_no;
9302 host->max_cmd_len = IPR_MAX_CDB_LEN;
9303 host->can_queue = ioa_cfg->max_cmds;
9304 pci_set_drvdata(pdev, ioa_cfg);
9305
9306 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9307 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9308 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9309 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9310 if (i == 0)
9311 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9312 else
9313 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9314 }
9315 }
9316
9317 /**
9318 * ipr_get_chip_info - Find adapter chip information
9319 * @dev_id: PCI device id struct
9320 *
9321 * Return value:
9322 * ptr to chip information on success / NULL on failure
9323 **/
9324 static const struct ipr_chip_t *
9325 ipr_get_chip_info(const struct pci_device_id *dev_id)
9326 {
9327 int i;
9328
9329 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9330 if (ipr_chip[i].vendor == dev_id->vendor &&
9331 ipr_chip[i].device == dev_id->device)
9332 return &ipr_chip[i];
9333 return NULL;
9334 }
9335
9336 /**
9337 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9338 * during probe time
9339 * @ioa_cfg: ioa config struct
9340 *
9341 * Return value:
9342 * None
9343 **/
9344 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9345 {
9346 struct pci_dev *pdev = ioa_cfg->pdev;
9347
9348 if (pci_channel_offline(pdev)) {
9349 wait_event_timeout(ioa_cfg->eeh_wait_q,
9350 !pci_channel_offline(pdev),
9351 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9352 pci_restore_state(pdev);
9353 }
9354 }
9355
9356 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9357 {
9358 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9359 int i, vectors;
9360
9361 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9362 entries[i].entry = i;
9363
9364 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9365 entries, 1, ipr_number_of_msix);
9366 if (vectors < 0) {
9367 ipr_wait_for_pci_err_recovery(ioa_cfg);
9368 return vectors;
9369 }
9370
9371 for (i = 0; i < vectors; i++)
9372 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9373 ioa_cfg->nvectors = vectors;
9374
9375 return 0;
9376 }
9377
9378 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9379 {
9380 int i, vectors;
9381
9382 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9383 if (vectors < 0) {
9384 ipr_wait_for_pci_err_recovery(ioa_cfg);
9385 return vectors;
9386 }
9387
9388 for (i = 0; i < vectors; i++)
9389 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9390 ioa_cfg->nvectors = vectors;
9391
9392 return 0;
9393 }
9394
9395 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9396 {
9397 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9398
9399 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9400 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9401 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9402 ioa_cfg->vectors_info[vec_idx].
9403 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9404 }
9405 }
9406
9407 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9408 {
9409 int i, rc;
9410
9411 for (i = 1; i < ioa_cfg->nvectors; i++) {
9412 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9413 ipr_isr_mhrrq,
9414 0,
9415 ioa_cfg->vectors_info[i].desc,
9416 &ioa_cfg->hrrq[i]);
9417 if (rc) {
9418 while (--i >= 0)
9419 free_irq(ioa_cfg->vectors_info[i].vec,
9420 &ioa_cfg->hrrq[i]);
9421 return rc;
9422 }
9423 }
9424 return 0;
9425 }
9426
9427 /**
9428 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9429 * @pdev: PCI device struct
9430 *
9431 * Description: Simply set the msi_received flag to 1 indicating that
9432 * Message Signaled Interrupts are supported.
9433 *
9434 * Return value:
9435 * 0 on success / non-zero on failure
9436 **/
9437 static irqreturn_t ipr_test_intr(int irq, void *devp)
9438 {
9439 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9440 unsigned long lock_flags = 0;
9441 irqreturn_t rc = IRQ_HANDLED;
9442
9443 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9444 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9445
9446 ioa_cfg->msi_received = 1;
9447 wake_up(&ioa_cfg->msi_wait_q);
9448
9449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9450 return rc;
9451 }
9452
9453 /**
9454 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9455 * @pdev: PCI device struct
9456 *
9457 * Description: The return value from pci_enable_msi_range() can not always be
9458 * trusted. This routine sets up and initiates a test interrupt to determine
9459 * if the interrupt is received via the ipr_test_intr() service routine.
9460 * If the tests fails, the driver will fall back to LSI.
9461 *
9462 * Return value:
9463 * 0 on success / non-zero on failure
9464 **/
9465 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9466 {
9467 int rc;
9468 volatile u32 int_reg;
9469 unsigned long lock_flags = 0;
9470
9471 ENTER;
9472
9473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9474 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9475 ioa_cfg->msi_received = 0;
9476 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9477 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9478 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9480
9481 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9482 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9483 else
9484 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9485 if (rc) {
9486 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9487 return rc;
9488 } else if (ipr_debug)
9489 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9490
9491 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9493 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9495 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9496
9497 if (!ioa_cfg->msi_received) {
9498 /* MSI test failed */
9499 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9500 rc = -EOPNOTSUPP;
9501 } else if (ipr_debug)
9502 dev_info(&pdev->dev, "MSI test succeeded.\n");
9503
9504 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9505
9506 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9507 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9508 else
9509 free_irq(pdev->irq, ioa_cfg);
9510
9511 LEAVE;
9512
9513 return rc;
9514 }
9515
9516 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9517 * @pdev: PCI device struct
9518 * @dev_id: PCI device id struct
9519 *
9520 * Return value:
9521 * 0 on success / non-zero on failure
9522 **/
9523 static int ipr_probe_ioa(struct pci_dev *pdev,
9524 const struct pci_device_id *dev_id)
9525 {
9526 struct ipr_ioa_cfg *ioa_cfg;
9527 struct Scsi_Host *host;
9528 unsigned long ipr_regs_pci;
9529 void __iomem *ipr_regs;
9530 int rc = PCIBIOS_SUCCESSFUL;
9531 volatile u32 mask, uproc, interrupts;
9532 unsigned long lock_flags, driver_lock_flags;
9533
9534 ENTER;
9535
9536 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9537 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9538
9539 if (!host) {
9540 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9541 rc = -ENOMEM;
9542 goto out;
9543 }
9544
9545 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9546 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9547 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9548
9549 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9550
9551 if (!ioa_cfg->ipr_chip) {
9552 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9553 dev_id->vendor, dev_id->device);
9554 goto out_scsi_host_put;
9555 }
9556
9557 /* set SIS 32 or SIS 64 */
9558 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9559 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9560 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9561 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9562
9563 if (ipr_transop_timeout)
9564 ioa_cfg->transop_timeout = ipr_transop_timeout;
9565 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9566 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9567 else
9568 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9569
9570 ioa_cfg->revid = pdev->revision;
9571
9572 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9573
9574 ipr_regs_pci = pci_resource_start(pdev, 0);
9575
9576 rc = pci_request_regions(pdev, IPR_NAME);
9577 if (rc < 0) {
9578 dev_err(&pdev->dev,
9579 "Couldn't register memory range of registers\n");
9580 goto out_scsi_host_put;
9581 }
9582
9583 rc = pci_enable_device(pdev);
9584
9585 if (rc || pci_channel_offline(pdev)) {
9586 if (pci_channel_offline(pdev)) {
9587 ipr_wait_for_pci_err_recovery(ioa_cfg);
9588 rc = pci_enable_device(pdev);
9589 }
9590
9591 if (rc) {
9592 dev_err(&pdev->dev, "Cannot enable adapter\n");
9593 ipr_wait_for_pci_err_recovery(ioa_cfg);
9594 goto out_release_regions;
9595 }
9596 }
9597
9598 ipr_regs = pci_ioremap_bar(pdev, 0);
9599
9600 if (!ipr_regs) {
9601 dev_err(&pdev->dev,
9602 "Couldn't map memory range of registers\n");
9603 rc = -ENOMEM;
9604 goto out_disable;
9605 }
9606
9607 ioa_cfg->hdw_dma_regs = ipr_regs;
9608 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9609 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9610
9611 ipr_init_regs(ioa_cfg);
9612
9613 if (ioa_cfg->sis64) {
9614 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9615 if (rc < 0) {
9616 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9617 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9618 }
9619 } else
9620 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9621
9622 if (rc < 0) {
9623 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9624 goto cleanup_nomem;
9625 }
9626
9627 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9628 ioa_cfg->chip_cfg->cache_line_size);
9629
9630 if (rc != PCIBIOS_SUCCESSFUL) {
9631 dev_err(&pdev->dev, "Write of cache line size failed\n");
9632 ipr_wait_for_pci_err_recovery(ioa_cfg);
9633 rc = -EIO;
9634 goto cleanup_nomem;
9635 }
9636
9637 /* Issue MMIO read to ensure card is not in EEH */
9638 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9639 ipr_wait_for_pci_err_recovery(ioa_cfg);
9640
9641 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9642 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9643 IPR_MAX_MSIX_VECTORS);
9644 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9645 }
9646
9647 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9648 ipr_enable_msix(ioa_cfg) == 0)
9649 ioa_cfg->intr_flag = IPR_USE_MSIX;
9650 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9651 ipr_enable_msi(ioa_cfg) == 0)
9652 ioa_cfg->intr_flag = IPR_USE_MSI;
9653 else {
9654 ioa_cfg->intr_flag = IPR_USE_LSI;
9655 ioa_cfg->nvectors = 1;
9656 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9657 }
9658
9659 pci_set_master(pdev);
9660
9661 if (pci_channel_offline(pdev)) {
9662 ipr_wait_for_pci_err_recovery(ioa_cfg);
9663 pci_set_master(pdev);
9664 if (pci_channel_offline(pdev)) {
9665 rc = -EIO;
9666 goto out_msi_disable;
9667 }
9668 }
9669
9670 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9671 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9672 rc = ipr_test_msi(ioa_cfg, pdev);
9673 if (rc == -EOPNOTSUPP) {
9674 ipr_wait_for_pci_err_recovery(ioa_cfg);
9675 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9676 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9677 pci_disable_msi(pdev);
9678 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9679 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9680 pci_disable_msix(pdev);
9681 }
9682
9683 ioa_cfg->intr_flag = IPR_USE_LSI;
9684 ioa_cfg->nvectors = 1;
9685 }
9686 else if (rc)
9687 goto out_msi_disable;
9688 else {
9689 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9690 dev_info(&pdev->dev,
9691 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9692 ioa_cfg->nvectors, pdev->irq);
9693 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9694 dev_info(&pdev->dev,
9695 "Request for %d MSIXs succeeded.",
9696 ioa_cfg->nvectors);
9697 }
9698 }
9699
9700 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9701 (unsigned int)num_online_cpus(),
9702 (unsigned int)IPR_MAX_HRRQ_NUM);
9703
9704 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9705 goto out_msi_disable;
9706
9707 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9708 goto out_msi_disable;
9709
9710 rc = ipr_alloc_mem(ioa_cfg);
9711 if (rc < 0) {
9712 dev_err(&pdev->dev,
9713 "Couldn't allocate enough memory for device driver!\n");
9714 goto out_msi_disable;
9715 }
9716
9717 /* Save away PCI config space for use following IOA reset */
9718 rc = pci_save_state(pdev);
9719
9720 if (rc != PCIBIOS_SUCCESSFUL) {
9721 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9722 rc = -EIO;
9723 goto cleanup_nolog;
9724 }
9725
9726 /*
9727 * If HRRQ updated interrupt is not masked, or reset alert is set,
9728 * the card is in an unknown state and needs a hard reset
9729 */
9730 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9731 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9732 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9733 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9734 ioa_cfg->needs_hard_reset = 1;
9735 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9736 ioa_cfg->needs_hard_reset = 1;
9737 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9738 ioa_cfg->ioa_unit_checked = 1;
9739
9740 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9741 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9743
9744 if (ioa_cfg->intr_flag == IPR_USE_MSI
9745 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9746 name_msi_vectors(ioa_cfg);
9747 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9748 0,
9749 ioa_cfg->vectors_info[0].desc,
9750 &ioa_cfg->hrrq[0]);
9751 if (!rc)
9752 rc = ipr_request_other_msi_irqs(ioa_cfg);
9753 } else {
9754 rc = request_irq(pdev->irq, ipr_isr,
9755 IRQF_SHARED,
9756 IPR_NAME, &ioa_cfg->hrrq[0]);
9757 }
9758 if (rc) {
9759 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9760 pdev->irq, rc);
9761 goto cleanup_nolog;
9762 }
9763
9764 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9765 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9766 ioa_cfg->needs_warm_reset = 1;
9767 ioa_cfg->reset = ipr_reset_slot_reset;
9768 } else
9769 ioa_cfg->reset = ipr_reset_start_bist;
9770
9771 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9772 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9773 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9774
9775 LEAVE;
9776 out:
9777 return rc;
9778
9779 cleanup_nolog:
9780 ipr_free_mem(ioa_cfg);
9781 out_msi_disable:
9782 ipr_wait_for_pci_err_recovery(ioa_cfg);
9783 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9784 pci_disable_msi(pdev);
9785 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9786 pci_disable_msix(pdev);
9787 cleanup_nomem:
9788 iounmap(ipr_regs);
9789 out_disable:
9790 pci_disable_device(pdev);
9791 out_release_regions:
9792 pci_release_regions(pdev);
9793 out_scsi_host_put:
9794 scsi_host_put(host);
9795 goto out;
9796 }
9797
9798 /**
9799 * ipr_scan_vsets - Scans for VSET devices
9800 * @ioa_cfg: ioa config struct
9801 *
9802 * Description: Since the VSET resources do not follow SAM in that we can have
9803 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9804 *
9805 * Return value:
9806 * none
9807 **/
9808 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9809 {
9810 int target, lun;
9811
9812 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9813 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9814 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9815 }
9816
9817 /**
9818 * ipr_initiate_ioa_bringdown - Bring down an adapter
9819 * @ioa_cfg: ioa config struct
9820 * @shutdown_type: shutdown type
9821 *
9822 * Description: This function will initiate bringing down the adapter.
9823 * This consists of issuing an IOA shutdown to the adapter
9824 * to flush the cache, and running BIST.
9825 * If the caller needs to wait on the completion of the reset,
9826 * the caller must sleep on the reset_wait_q.
9827 *
9828 * Return value:
9829 * none
9830 **/
9831 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9832 enum ipr_shutdown_type shutdown_type)
9833 {
9834 ENTER;
9835 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9836 ioa_cfg->sdt_state = ABORT_DUMP;
9837 ioa_cfg->reset_retries = 0;
9838 ioa_cfg->in_ioa_bringdown = 1;
9839 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9840 LEAVE;
9841 }
9842
9843 /**
9844 * __ipr_remove - Remove a single adapter
9845 * @pdev: pci device struct
9846 *
9847 * Adapter hot plug remove entry point.
9848 *
9849 * Return value:
9850 * none
9851 **/
9852 static void __ipr_remove(struct pci_dev *pdev)
9853 {
9854 unsigned long host_lock_flags = 0;
9855 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9856 int i;
9857 unsigned long driver_lock_flags;
9858 ENTER;
9859
9860 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9861 while (ioa_cfg->in_reset_reload) {
9862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9863 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9864 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9865 }
9866
9867 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9868 spin_lock(&ioa_cfg->hrrq[i]._lock);
9869 ioa_cfg->hrrq[i].removing_ioa = 1;
9870 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9871 }
9872 wmb();
9873 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9874
9875 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9876 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9877 flush_work(&ioa_cfg->work_q);
9878 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9879 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9880
9881 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9882 list_del(&ioa_cfg->queue);
9883 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9884
9885 if (ioa_cfg->sdt_state == ABORT_DUMP)
9886 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9888
9889 ipr_free_all_resources(ioa_cfg);
9890
9891 LEAVE;
9892 }
9893
9894 /**
9895 * ipr_remove - IOA hot plug remove entry point
9896 * @pdev: pci device struct
9897 *
9898 * Adapter hot plug remove entry point.
9899 *
9900 * Return value:
9901 * none
9902 **/
9903 static void ipr_remove(struct pci_dev *pdev)
9904 {
9905 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9906
9907 ENTER;
9908
9909 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9910 &ipr_trace_attr);
9911 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9912 &ipr_dump_attr);
9913 scsi_remove_host(ioa_cfg->host);
9914
9915 __ipr_remove(pdev);
9916
9917 LEAVE;
9918 }
9919
9920 /**
9921 * ipr_probe - Adapter hot plug add entry point
9922 *
9923 * Return value:
9924 * 0 on success / non-zero on failure
9925 **/
9926 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9927 {
9928 struct ipr_ioa_cfg *ioa_cfg;
9929 int rc, i;
9930
9931 rc = ipr_probe_ioa(pdev, dev_id);
9932
9933 if (rc)
9934 return rc;
9935
9936 ioa_cfg = pci_get_drvdata(pdev);
9937 rc = ipr_probe_ioa_part2(ioa_cfg);
9938
9939 if (rc) {
9940 __ipr_remove(pdev);
9941 return rc;
9942 }
9943
9944 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9945
9946 if (rc) {
9947 __ipr_remove(pdev);
9948 return rc;
9949 }
9950
9951 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9952 &ipr_trace_attr);
9953
9954 if (rc) {
9955 scsi_remove_host(ioa_cfg->host);
9956 __ipr_remove(pdev);
9957 return rc;
9958 }
9959
9960 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9961 &ipr_dump_attr);
9962
9963 if (rc) {
9964 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9965 &ipr_trace_attr);
9966 scsi_remove_host(ioa_cfg->host);
9967 __ipr_remove(pdev);
9968 return rc;
9969 }
9970
9971 scsi_scan_host(ioa_cfg->host);
9972 ipr_scan_vsets(ioa_cfg);
9973 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9974 ioa_cfg->allow_ml_add_del = 1;
9975 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9976 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9977
9978 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9979 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9980 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9981 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9982 ioa_cfg->iopoll_weight, ipr_iopoll);
9983 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9984 }
9985 }
9986
9987 schedule_work(&ioa_cfg->work_q);
9988 return 0;
9989 }
9990
9991 /**
9992 * ipr_shutdown - Shutdown handler.
9993 * @pdev: pci device struct
9994 *
9995 * This function is invoked upon system shutdown/reboot. It will issue
9996 * an adapter shutdown to the adapter to flush the write cache.
9997 *
9998 * Return value:
9999 * none
10000 **/
10001 static void ipr_shutdown(struct pci_dev *pdev)
10002 {
10003 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10004 unsigned long lock_flags = 0;
10005 int i;
10006
10007 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10008 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
10009 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10010 ioa_cfg->iopoll_weight = 0;
10011 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10012 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10013 }
10014
10015 while (ioa_cfg->in_reset_reload) {
10016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10017 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10019 }
10020
10021 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10023 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10024 }
10025
10026 static struct pci_device_id ipr_pci_table[] = {
10027 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10028 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10029 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10030 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10031 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10032 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10033 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10034 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10035 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10036 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10037 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10038 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10039 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10040 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10041 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10042 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10043 IPR_USE_LONG_TRANSOP_TIMEOUT },
10044 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10046 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10048 IPR_USE_LONG_TRANSOP_TIMEOUT },
10049 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10050 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10051 IPR_USE_LONG_TRANSOP_TIMEOUT },
10052 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10053 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10055 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10056 IPR_USE_LONG_TRANSOP_TIMEOUT},
10057 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10058 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10059 IPR_USE_LONG_TRANSOP_TIMEOUT },
10060 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10061 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10062 IPR_USE_LONG_TRANSOP_TIMEOUT },
10063 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10064 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10065 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10066 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10067 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10068 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10069 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10070 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10071 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10072 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10073 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10074 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10075 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10076 IPR_USE_LONG_TRANSOP_TIMEOUT },
10077 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10078 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10079 IPR_USE_LONG_TRANSOP_TIMEOUT },
10080 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10081 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10082 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10083 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10084 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10085 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10086 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10087 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10088 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10089 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10090 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10091 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10092 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10093 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10094 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10095 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10096 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10097 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10098 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10099 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10100 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10101 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10102 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10103 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10104 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10105 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10106 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10107 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10108 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10109 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10110 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10111 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10112 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10113 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10114 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10115 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10116 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10117 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10118 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10119 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10120 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10121 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10122 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10123 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10124 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10125 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10126 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10127 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10128 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10129 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10130 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10131 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10132 { }
10133 };
10134 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10135
10136 static const struct pci_error_handlers ipr_err_handler = {
10137 .error_detected = ipr_pci_error_detected,
10138 .mmio_enabled = ipr_pci_mmio_enabled,
10139 .slot_reset = ipr_pci_slot_reset,
10140 };
10141
10142 static struct pci_driver ipr_driver = {
10143 .name = IPR_NAME,
10144 .id_table = ipr_pci_table,
10145 .probe = ipr_probe,
10146 .remove = ipr_remove,
10147 .shutdown = ipr_shutdown,
10148 .err_handler = &ipr_err_handler,
10149 };
10150
10151 /**
10152 * ipr_halt_done - Shutdown prepare completion
10153 *
10154 * Return value:
10155 * none
10156 **/
10157 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10158 {
10159 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10160 }
10161
10162 /**
10163 * ipr_halt - Issue shutdown prepare to all adapters
10164 *
10165 * Return value:
10166 * NOTIFY_OK on success / NOTIFY_DONE on failure
10167 **/
10168 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10169 {
10170 struct ipr_cmnd *ipr_cmd;
10171 struct ipr_ioa_cfg *ioa_cfg;
10172 unsigned long flags = 0, driver_lock_flags;
10173
10174 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10175 return NOTIFY_DONE;
10176
10177 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10178
10179 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10180 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10181 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10182 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10183 continue;
10184 }
10185
10186 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10187 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10188 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10189 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10190 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10191
10192 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10194 }
10195 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10196
10197 return NOTIFY_OK;
10198 }
10199
10200 static struct notifier_block ipr_notifier = {
10201 ipr_halt, NULL, 0
10202 };
10203
10204 /**
10205 * ipr_init - Module entry point
10206 *
10207 * Return value:
10208 * 0 on success / negative value on failure
10209 **/
10210 static int __init ipr_init(void)
10211 {
10212 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10213 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10214
10215 register_reboot_notifier(&ipr_notifier);
10216 return pci_register_driver(&ipr_driver);
10217 }
10218
10219 /**
10220 * ipr_exit - Module unload
10221 *
10222 * Module unload entry point.
10223 *
10224 * Return value:
10225 * none
10226 **/
10227 static void __exit ipr_exit(void)
10228 {
10229 unregister_reboot_notifier(&ipr_notifier);
10230 pci_unregister_driver(&ipr_driver);
10231 }
10232
10233 module_init(ipr_init);
10234 module_exit(ipr_exit);
This page took 0.234659 seconds and 6 git commands to generate.