[SCSI] ipr: Properly handle IOA recovered errors
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <asm/io.h>
74#include <asm/irq.h>
75#include <asm/processor.h>
76#include <scsi/scsi.h>
77#include <scsi/scsi_host.h>
78#include <scsi/scsi_tcq.h>
79#include <scsi/scsi_eh.h>
80#include <scsi/scsi_cmnd.h>
1da177e4
LT
81#include "ipr.h"
82
83/*
84 * Global Data
85 */
86static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
87static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
88static unsigned int ipr_max_speed = 1;
89static int ipr_testmode = 0;
90static unsigned int ipr_fastfail = 0;
91static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 92static unsigned int ipr_enable_cache = 1;
d3c74871 93static unsigned int ipr_debug = 0;
32d29776 94static int ipr_auto_create = 1;
1da177e4
LT
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
86f51436 99 { /* Gemstone, Citrine, and Obsidian */
1da177e4
LT
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436 134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
1da177e4
LT
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
138};
139
140static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
142};
143
144MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146module_param_named(max_speed, ipr_max_speed, uint, 0);
147MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148module_param_named(log_level, ipr_log_level, uint, 0);
149MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150module_param_named(testmode, ipr_testmode, int, 0);
151MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152module_param_named(fastfail, ipr_fastfail, int, 0);
153MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040 156module_param_named(enable_cache, ipr_enable_cache, int, 0);
157MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871 158module_param_named(debug, ipr_debug, int, 0);
159MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
32d29776 160module_param_named(auto_create, ipr_auto_create, int, 0);
161MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
1da177e4
LT
162MODULE_LICENSE("GPL");
163MODULE_VERSION(IPR_DRIVER_VERSION);
164
1da177e4
LT
165/* A constant array of IOASCs/URCs/Error Messages */
166static const
167struct ipr_error_table_t ipr_error_table[] = {
168 {0x00000000, 1, 1,
169 "8155: An unknown error was received"},
170 {0x00330000, 0, 0,
171 "Soft underlength error"},
172 {0x005A0000, 0, 0,
173 "Command to be cancelled not found"},
174 {0x00808000, 0, 0,
175 "Qualified success"},
176 {0x01080000, 1, 1,
177 "FFFE: Soft device bus error recovered by the IOA"},
896bbd21
BK
178 {0x01088100, 0, 1,
179 "4101: Soft device bus fabric error"},
1da177e4
LT
180 {0x01170600, 0, 1,
181 "FFF9: Device sector reassign successful"},
182 {0x01170900, 0, 1,
183 "FFF7: Media error recovered by device rewrite procedures"},
184 {0x01180200, 0, 1,
185 "7001: IOA sector reassignment successful"},
186 {0x01180500, 0, 1,
187 "FFF9: Soft media error. Sector reassignment recommended"},
188 {0x01180600, 0, 1,
189 "FFF7: Media error recovered by IOA rewrite procedures"},
190 {0x01418000, 0, 1,
191 "FF3D: Soft PCI bus error recovered by the IOA"},
192 {0x01440000, 1, 1,
193 "FFF6: Device hardware error recovered by the IOA"},
194 {0x01448100, 0, 1,
195 "FFF6: Device hardware error recovered by the device"},
196 {0x01448200, 1, 1,
197 "FF3D: Soft IOA error recovered by the IOA"},
198 {0x01448300, 0, 1,
199 "FFFA: Undefined device response recovered by the IOA"},
200 {0x014A0000, 1, 1,
201 "FFF6: Device bus error, message or command phase"},
202 {0x015D0000, 0, 1,
203 "FFF6: Failure prediction threshold exceeded"},
204 {0x015D9200, 0, 1,
205 "8009: Impending cache battery pack failure"},
206 {0x02040400, 0, 0,
207 "34FF: Disk device format in progress"},
208 {0x023F0000, 0, 0,
209 "Synchronization required"},
210 {0x024E0000, 0, 0,
211 "No ready, IOA shutdown"},
212 {0x025A0000, 0, 0,
213 "Not ready, IOA has been shutdown"},
214 {0x02670100, 0, 1,
215 "3020: Storage subsystem configuration error"},
216 {0x03110B00, 0, 0,
217 "FFF5: Medium error, data unreadable, recommend reassign"},
218 {0x03110C00, 0, 0,
219 "7000: Medium error, data unreadable, do not reassign"},
220 {0x03310000, 0, 1,
221 "FFF3: Disk media format bad"},
222 {0x04050000, 0, 1,
223 "3002: Addressed device failed to respond to selection"},
224 {0x04080000, 1, 1,
225 "3100: Device bus error"},
226 {0x04080100, 0, 1,
227 "3109: IOA timed out a device command"},
228 {0x04088000, 0, 0,
229 "3120: SCSI bus is not operational"},
896bbd21
BK
230 {0x04088100, 0, 1,
231 "4100: Hard device bus fabric error"},
1da177e4
LT
232 {0x04118000, 0, 1,
233 "9000: IOA reserved area data check"},
234 {0x04118100, 0, 1,
235 "9001: IOA reserved area invalid data pattern"},
236 {0x04118200, 0, 1,
237 "9002: IOA reserved area LRC error"},
238 {0x04320000, 0, 1,
239 "102E: Out of alternate sectors for disk storage"},
240 {0x04330000, 1, 1,
241 "FFF4: Data transfer underlength error"},
242 {0x04338000, 1, 1,
243 "FFF4: Data transfer overlength error"},
244 {0x043E0100, 0, 1,
245 "3400: Logical unit failure"},
246 {0x04408500, 0, 1,
247 "FFF4: Device microcode is corrupt"},
248 {0x04418000, 1, 1,
249 "8150: PCI bus error"},
250 {0x04430000, 1, 0,
251 "Unsupported device bus message received"},
252 {0x04440000, 1, 1,
253 "FFF4: Disk device problem"},
254 {0x04448200, 1, 1,
255 "8150: Permanent IOA failure"},
256 {0x04448300, 0, 1,
257 "3010: Disk device returned wrong response to IOA"},
258 {0x04448400, 0, 1,
259 "8151: IOA microcode error"},
260 {0x04448500, 0, 0,
261 "Device bus status error"},
262 {0x04448600, 0, 1,
263 "8157: IOA error requiring IOA reset to recover"},
264 {0x04490000, 0, 0,
265 "Message reject received from the device"},
266 {0x04449200, 0, 1,
267 "8008: A permanent cache battery pack failure occurred"},
268 {0x0444A000, 0, 1,
269 "9090: Disk unit has been modified after the last known status"},
270 {0x0444A200, 0, 1,
271 "9081: IOA detected device error"},
272 {0x0444A300, 0, 1,
273 "9082: IOA detected device error"},
274 {0x044A0000, 1, 1,
275 "3110: Device bus error, message or command phase"},
276 {0x04670400, 0, 1,
277 "9091: Incorrect hardware configuration change has been detected"},
b0df54bb 278 {0x04678000, 0, 1,
279 "9073: Invalid multi-adapter configuration"},
896bbd21
BK
280 {0x04678100, 0, 1,
281 "4010: Incorrect connection between cascaded expanders"},
282 {0x04678200, 0, 1,
283 "4020: Connections exceed IOA design limits"},
284 {0x04678300, 0, 1,
285 "4030: Incorrect multipath connection"},
286 {0x04679000, 0, 1,
287 "4110: Unsupported enclosure function"},
1da177e4
LT
288 {0x046E0000, 0, 1,
289 "FFF4: Command to logical unit failed"},
290 {0x05240000, 1, 0,
291 "Illegal request, invalid request type or request packet"},
292 {0x05250000, 0, 0,
293 "Illegal request, invalid resource handle"},
b0df54bb 294 {0x05258000, 0, 0,
295 "Illegal request, commands not allowed to this device"},
296 {0x05258100, 0, 0,
297 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
298 {0x05260000, 0, 0,
299 "Illegal request, invalid field in parameter list"},
300 {0x05260100, 0, 0,
301 "Illegal request, parameter not supported"},
302 {0x05260200, 0, 0,
303 "Illegal request, parameter value invalid"},
304 {0x052C0000, 0, 0,
305 "Illegal request, command sequence error"},
b0df54bb 306 {0x052C8000, 1, 0,
307 "Illegal request, dual adapter support not enabled"},
1da177e4
LT
308 {0x06040500, 0, 1,
309 "9031: Array protection temporarily suspended, protection resuming"},
310 {0x06040600, 0, 1,
311 "9040: Array protection temporarily suspended, protection resuming"},
896bbd21
BK
312 {0x06288000, 0, 1,
313 "3140: Device bus not ready to ready transition"},
1da177e4
LT
314 {0x06290000, 0, 1,
315 "FFFB: SCSI bus was reset"},
316 {0x06290500, 0, 0,
317 "FFFE: SCSI bus transition to single ended"},
318 {0x06290600, 0, 0,
319 "FFFE: SCSI bus transition to LVD"},
320 {0x06298000, 0, 1,
321 "FFFB: SCSI bus was reset by another initiator"},
322 {0x063F0300, 0, 1,
323 "3029: A device replacement has occurred"},
324 {0x064C8000, 0, 1,
325 "9051: IOA cache data exists for a missing or failed device"},
b0df54bb 326 {0x064C8100, 0, 1,
327 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
1da177e4
LT
328 {0x06670100, 0, 1,
329 "9025: Disk unit is not supported at its physical location"},
330 {0x06670600, 0, 1,
331 "3020: IOA detected a SCSI bus configuration error"},
332 {0x06678000, 0, 1,
333 "3150: SCSI bus configuration error"},
b0df54bb 334 {0x06678100, 0, 1,
335 "9074: Asymmetric advanced function disk configuration"},
896bbd21
BK
336 {0x06678300, 0, 1,
337 "4040: Incomplete multipath connection between IOA and enclosure"},
338 {0x06678400, 0, 1,
339 "4041: Incomplete multipath connection between enclosure and device"},
340 {0x06678500, 0, 1,
341 "9075: Incomplete multipath connection between IOA and remote IOA"},
342 {0x06678600, 0, 1,
343 "9076: Configuration error, missing remote IOA"},
344 {0x06679100, 0, 1,
345 "4050: Enclosure does not support a required multipath function"},
1da177e4
LT
346 {0x06690200, 0, 1,
347 "9041: Array protection temporarily suspended"},
348 {0x06698200, 0, 1,
349 "9042: Corrupt array parity detected on specified device"},
350 {0x066B0200, 0, 1,
351 "9030: Array no longer protected due to missing or failed disk unit"},
b0df54bb 352 {0x066B8000, 0, 1,
353 "9071: Link operational transition"},
354 {0x066B8100, 0, 1,
355 "9072: Link not operational transition"},
1da177e4
LT
356 {0x066B8200, 0, 1,
357 "9032: Array exposed but still protected"},
896bbd21
BK
358 {0x066B9100, 0, 1,
359 "4061: Multipath redundancy level got better"},
360 {0x066B9200, 0, 1,
361 "4060: Multipath redundancy level got worse"},
1da177e4
LT
362 {0x07270000, 0, 0,
363 "Failure due to other device"},
364 {0x07278000, 0, 1,
365 "9008: IOA does not support functions expected by devices"},
366 {0x07278100, 0, 1,
367 "9010: Cache data associated with attached devices cannot be found"},
368 {0x07278200, 0, 1,
369 "9011: Cache data belongs to devices other than those attached"},
370 {0x07278400, 0, 1,
371 "9020: Array missing 2 or more devices with only 1 device present"},
372 {0x07278500, 0, 1,
373 "9021: Array missing 2 or more devices with 2 or more devices present"},
374 {0x07278600, 0, 1,
375 "9022: Exposed array is missing a required device"},
376 {0x07278700, 0, 1,
377 "9023: Array member(s) not at required physical locations"},
378 {0x07278800, 0, 1,
379 "9024: Array not functional due to present hardware configuration"},
380 {0x07278900, 0, 1,
381 "9026: Array not functional due to present hardware configuration"},
382 {0x07278A00, 0, 1,
383 "9027: Array is missing a device and parity is out of sync"},
384 {0x07278B00, 0, 1,
385 "9028: Maximum number of arrays already exist"},
386 {0x07278C00, 0, 1,
387 "9050: Required cache data cannot be located for a disk unit"},
388 {0x07278D00, 0, 1,
389 "9052: Cache data exists for a device that has been modified"},
390 {0x07278F00, 0, 1,
391 "9054: IOA resources not available due to previous problems"},
392 {0x07279100, 0, 1,
393 "9092: Disk unit requires initialization before use"},
394 {0x07279200, 0, 1,
395 "9029: Incorrect hardware configuration change has been detected"},
396 {0x07279600, 0, 1,
397 "9060: One or more disk pairs are missing from an array"},
398 {0x07279700, 0, 1,
399 "9061: One or more disks are missing from an array"},
400 {0x07279800, 0, 1,
401 "9062: One or more disks are missing from an array"},
402 {0x07279900, 0, 1,
403 "9063: Maximum number of functional arrays has been exceeded"},
404 {0x0B260000, 0, 0,
405 "Aborted command, invalid descriptor"},
406 {0x0B5A0000, 0, 0,
407 "Command terminated by host"}
408};
409
410static const struct ipr_ses_table_entry ipr_ses_table[] = {
411 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
412 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
413 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
414 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
415 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
416 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
417 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
418 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
419 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
420 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
421 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
422 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
423 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
424};
425
426/*
427 * Function Prototypes
428 */
429static int ipr_reset_alert(struct ipr_cmnd *);
430static void ipr_process_ccn(struct ipr_cmnd *);
431static void ipr_process_error(struct ipr_cmnd *);
432static void ipr_reset_ioa_job(struct ipr_cmnd *);
433static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
434 enum ipr_shutdown_type);
435
436#ifdef CONFIG_SCSI_IPR_TRACE
437/**
438 * ipr_trc_hook - Add a trace entry to the driver trace
439 * @ipr_cmd: ipr command struct
440 * @type: trace type
441 * @add_data: additional data
442 *
443 * Return value:
444 * none
445 **/
446static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
447 u8 type, u32 add_data)
448{
449 struct ipr_trace_entry *trace_entry;
450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
451
452 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
453 trace_entry->time = jiffies;
454 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
455 trace_entry->type = type;
456 trace_entry->cmd_index = ipr_cmd->cmd_index;
457 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
458 trace_entry->u.add_data = add_data;
459}
460#else
461#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
462#endif
463
464/**
465 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
466 * @ipr_cmd: ipr command struct
467 *
468 * Return value:
469 * none
470 **/
471static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
472{
473 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
474 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
475
476 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
477 ioarcb->write_data_transfer_length = 0;
478 ioarcb->read_data_transfer_length = 0;
479 ioarcb->write_ioadl_len = 0;
480 ioarcb->read_ioadl_len = 0;
481 ioasa->ioasc = 0;
482 ioasa->residual_data_len = 0;
483
484 ipr_cmd->scsi_cmd = NULL;
485 ipr_cmd->sense_buffer[0] = 0;
486 ipr_cmd->dma_use_sg = 0;
487}
488
489/**
490 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
491 * @ipr_cmd: ipr command struct
492 *
493 * Return value:
494 * none
495 **/
496static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
497{
498 ipr_reinit_ipr_cmnd(ipr_cmd);
499 ipr_cmd->u.scratch = 0;
500 ipr_cmd->sibling = NULL;
501 init_timer(&ipr_cmd->timer);
502}
503
504/**
505 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
506 * @ioa_cfg: ioa config struct
507 *
508 * Return value:
509 * pointer to ipr command struct
510 **/
511static
512struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
513{
514 struct ipr_cmnd *ipr_cmd;
515
516 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
517 list_del(&ipr_cmd->queue);
518 ipr_init_ipr_cmnd(ipr_cmd);
519
520 return ipr_cmd;
521}
522
523/**
524 * ipr_unmap_sglist - Unmap scatterlist if mapped
525 * @ioa_cfg: ioa config struct
526 * @ipr_cmd: ipr command struct
527 *
528 * Return value:
529 * nothing
530 **/
531static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
532 struct ipr_cmnd *ipr_cmd)
533{
534 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
535
536 if (ipr_cmd->dma_use_sg) {
537 if (scsi_cmd->use_sg > 0) {
538 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
539 scsi_cmd->use_sg,
540 scsi_cmd->sc_data_direction);
541 } else {
542 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
543 scsi_cmd->request_bufflen,
544 scsi_cmd->sc_data_direction);
545 }
546 }
547}
548
549/**
550 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
551 * @ioa_cfg: ioa config struct
552 * @clr_ints: interrupts to clear
553 *
554 * This function masks all interrupts on the adapter, then clears the
555 * interrupts specified in the mask
556 *
557 * Return value:
558 * none
559 **/
560static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
561 u32 clr_ints)
562{
563 volatile u32 int_reg;
564
565 /* Stop new interrupts */
566 ioa_cfg->allow_interrupts = 0;
567
568 /* Set interrupt mask to stop all new interrupts */
569 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
570
571 /* Clear any pending interrupts */
572 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
573 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
574}
575
576/**
577 * ipr_save_pcix_cmd_reg - Save PCI-X command register
578 * @ioa_cfg: ioa config struct
579 *
580 * Return value:
581 * 0 on success / -EIO on failure
582 **/
583static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584{
585 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586
587 if (pcix_cmd_reg == 0) {
588 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
589 return -EIO;
590 }
591
592 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
593 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
594 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
595 return -EIO;
596 }
597
598 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
599 return 0;
600}
601
602/**
603 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
604 * @ioa_cfg: ioa config struct
605 *
606 * Return value:
607 * 0 on success / -EIO on failure
608 **/
609static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
610{
611 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
612
613 if (pcix_cmd_reg) {
614 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
615 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
616 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
617 return -EIO;
618 }
619 } else {
620 dev_err(&ioa_cfg->pdev->dev,
621 "Failed to setup PCI-X command register\n");
622 return -EIO;
623 }
624
625 return 0;
626}
627
628/**
629 * ipr_scsi_eh_done - mid-layer done function for aborted ops
630 * @ipr_cmd: ipr command struct
631 *
632 * This function is invoked by the interrupt handler for
633 * ops generated by the SCSI mid-layer which are being aborted.
634 *
635 * Return value:
636 * none
637 **/
638static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
639{
640 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
641 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
642
643 scsi_cmd->result |= (DID_ERROR << 16);
644
645 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
646 scsi_cmd->scsi_done(scsi_cmd);
647 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
648}
649
650/**
651 * ipr_fail_all_ops - Fails all outstanding ops.
652 * @ioa_cfg: ioa config struct
653 *
654 * This function fails all outstanding ops.
655 *
656 * Return value:
657 * none
658 **/
659static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
660{
661 struct ipr_cmnd *ipr_cmd, *temp;
662
663 ENTER;
664 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
665 list_del(&ipr_cmd->queue);
666
667 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
668 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
669
670 if (ipr_cmd->scsi_cmd)
671 ipr_cmd->done = ipr_scsi_eh_done;
672
673 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
674 del_timer(&ipr_cmd->timer);
675 ipr_cmd->done(ipr_cmd);
676 }
677
678 LEAVE;
679}
680
681/**
682 * ipr_do_req - Send driver initiated requests.
683 * @ipr_cmd: ipr command struct
684 * @done: done function
685 * @timeout_func: timeout function
686 * @timeout: timeout value
687 *
688 * This function sends the specified command to the adapter with the
689 * timeout given. The done function is invoked on command completion.
690 *
691 * Return value:
692 * none
693 **/
694static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
695 void (*done) (struct ipr_cmnd *),
696 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
697{
698 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
699
700 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
701
702 ipr_cmd->done = done;
703
704 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
705 ipr_cmd->timer.expires = jiffies + timeout;
706 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
707
708 add_timer(&ipr_cmd->timer);
709
710 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
711
712 mb();
713 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
714 ioa_cfg->regs.ioarrin_reg);
715}
716
717/**
718 * ipr_internal_cmd_done - Op done function for an internally generated op.
719 * @ipr_cmd: ipr command struct
720 *
721 * This function is the op done function for an internally generated,
722 * blocking op. It simply wakes the sleeping thread.
723 *
724 * Return value:
725 * none
726 **/
727static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
728{
729 if (ipr_cmd->sibling)
730 ipr_cmd->sibling = NULL;
731 else
732 complete(&ipr_cmd->completion);
733}
734
735/**
736 * ipr_send_blocking_cmd - Send command and sleep on its completion.
737 * @ipr_cmd: ipr command struct
738 * @timeout_func: function to invoke if command times out
739 * @timeout: timeout
740 *
741 * Return value:
742 * none
743 **/
744static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
745 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
746 u32 timeout)
747{
748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
749
750 init_completion(&ipr_cmd->completion);
751 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
752
753 spin_unlock_irq(ioa_cfg->host->host_lock);
754 wait_for_completion(&ipr_cmd->completion);
755 spin_lock_irq(ioa_cfg->host->host_lock);
756}
757
758/**
759 * ipr_send_hcam - Send an HCAM to the adapter.
760 * @ioa_cfg: ioa config struct
761 * @type: HCAM type
762 * @hostrcb: hostrcb struct
763 *
764 * This function will send a Host Controlled Async command to the adapter.
765 * If HCAMs are currently not allowed to be issued to the adapter, it will
766 * place the hostrcb on the free queue.
767 *
768 * Return value:
769 * none
770 **/
771static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
772 struct ipr_hostrcb *hostrcb)
773{
774 struct ipr_cmnd *ipr_cmd;
775 struct ipr_ioarcb *ioarcb;
776
777 if (ioa_cfg->allow_cmds) {
778 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
779 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
780 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
781
782 ipr_cmd->u.hostrcb = hostrcb;
783 ioarcb = &ipr_cmd->ioarcb;
784
785 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
786 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
787 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
788 ioarcb->cmd_pkt.cdb[1] = type;
789 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
790 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
791
792 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
793 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
794 ipr_cmd->ioadl[0].flags_and_data_len =
795 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
796 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
797
798 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
799 ipr_cmd->done = ipr_process_ccn;
800 else
801 ipr_cmd->done = ipr_process_error;
802
803 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
804
805 mb();
806 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
807 ioa_cfg->regs.ioarrin_reg);
808 } else {
809 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
810 }
811}
812
813/**
814 * ipr_init_res_entry - Initialize a resource entry struct.
815 * @res: resource entry struct
816 *
817 * Return value:
818 * none
819 **/
820static void ipr_init_res_entry(struct ipr_resource_entry *res)
821{
ee0a90fa 822 res->needs_sync_complete = 0;
1da177e4
LT
823 res->in_erp = 0;
824 res->add_to_ml = 0;
825 res->del_from_ml = 0;
826 res->resetting_device = 0;
827 res->sdev = NULL;
828}
829
830/**
831 * ipr_handle_config_change - Handle a config change from the adapter
832 * @ioa_cfg: ioa config struct
833 * @hostrcb: hostrcb
834 *
835 * Return value:
836 * none
837 **/
838static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
839 struct ipr_hostrcb *hostrcb)
840{
841 struct ipr_resource_entry *res = NULL;
842 struct ipr_config_table_entry *cfgte;
843 u32 is_ndn = 1;
844
845 cfgte = &hostrcb->hcam.u.ccn.cfgte;
846
847 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
848 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
849 sizeof(cfgte->res_addr))) {
850 is_ndn = 0;
851 break;
852 }
853 }
854
855 if (is_ndn) {
856 if (list_empty(&ioa_cfg->free_res_q)) {
857 ipr_send_hcam(ioa_cfg,
858 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
859 hostrcb);
860 return;
861 }
862
863 res = list_entry(ioa_cfg->free_res_q.next,
864 struct ipr_resource_entry, queue);
865
866 list_del(&res->queue);
867 ipr_init_res_entry(res);
868 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
869 }
870
871 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
872
873 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
874 if (res->sdev) {
1da177e4 875 res->del_from_ml = 1;
1121b794 876 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
877 if (ioa_cfg->allow_ml_add_del)
878 schedule_work(&ioa_cfg->work_q);
879 } else
880 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
881 } else if (!res->sdev) {
882 res->add_to_ml = 1;
883 if (ioa_cfg->allow_ml_add_del)
884 schedule_work(&ioa_cfg->work_q);
885 }
886
887 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888}
889
890/**
891 * ipr_process_ccn - Op done function for a CCN.
892 * @ipr_cmd: ipr command struct
893 *
894 * This function is the op done function for a configuration
895 * change notification host controlled async from the adapter.
896 *
897 * Return value:
898 * none
899 **/
900static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
901{
902 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
903 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
904 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
905
906 list_del(&hostrcb->queue);
907 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
908
909 if (ioasc) {
910 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
911 dev_err(&ioa_cfg->pdev->dev,
912 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
913
914 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
915 } else {
916 ipr_handle_config_change(ioa_cfg, hostrcb);
917 }
918}
919
920/**
921 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 922 * @vpd: vendor/product id/sn struct
1da177e4
LT
923 *
924 * Return value:
925 * none
926 **/
cfc32139 927static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
928{
929 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
930 + IPR_SERIAL_NUM_LEN];
931
cfc32139 932 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
933 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
934 IPR_PROD_ID_LEN);
935 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
936 ipr_err("Vendor/Product ID: %s\n", buffer);
937
cfc32139 938 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
939 buffer[IPR_SERIAL_NUM_LEN] = '\0';
940 ipr_err(" Serial Number: %s\n", buffer);
941}
942
ee0f05b8 943/**
944 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
945 * @vpd: vendor/product id/sn/wwn struct
946 *
947 * Return value:
948 * none
949 **/
950static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
951{
952 ipr_log_vpd(&vpd->vpd);
953 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
954 be32_to_cpu(vpd->wwid[1]));
955}
956
957/**
958 * ipr_log_enhanced_cache_error - Log a cache error.
959 * @ioa_cfg: ioa config struct
960 * @hostrcb: hostrcb struct
961 *
962 * Return value:
963 * none
964 **/
965static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
966 struct ipr_hostrcb *hostrcb)
967{
968 struct ipr_hostrcb_type_12_error *error =
969 &hostrcb->hcam.u.error.u.type_12_error;
970
971 ipr_err("-----Current Configuration-----\n");
972 ipr_err("Cache Directory Card Information:\n");
973 ipr_log_ext_vpd(&error->ioa_vpd);
974 ipr_err("Adapter Card Information:\n");
975 ipr_log_ext_vpd(&error->cfc_vpd);
976
977 ipr_err("-----Expected Configuration-----\n");
978 ipr_err("Cache Directory Card Information:\n");
979 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
980 ipr_err("Adapter Card Information:\n");
981 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
982
983 ipr_err("Additional IOA Data: %08X %08X %08X\n",
984 be32_to_cpu(error->ioa_data[0]),
985 be32_to_cpu(error->ioa_data[1]),
986 be32_to_cpu(error->ioa_data[2]));
987}
988
1da177e4
LT
989/**
990 * ipr_log_cache_error - Log a cache error.
991 * @ioa_cfg: ioa config struct
992 * @hostrcb: hostrcb struct
993 *
994 * Return value:
995 * none
996 **/
997static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
998 struct ipr_hostrcb *hostrcb)
999{
1000 struct ipr_hostrcb_type_02_error *error =
1001 &hostrcb->hcam.u.error.u.type_02_error;
1002
1003 ipr_err("-----Current Configuration-----\n");
1004 ipr_err("Cache Directory Card Information:\n");
cfc32139 1005 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1006 ipr_err("Adapter Card Information:\n");
cfc32139 1007 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1008
1009 ipr_err("-----Expected Configuration-----\n");
1010 ipr_err("Cache Directory Card Information:\n");
cfc32139 1011 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1012 ipr_err("Adapter Card Information:\n");
cfc32139 1013 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1014
1015 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1016 be32_to_cpu(error->ioa_data[0]),
1017 be32_to_cpu(error->ioa_data[1]),
1018 be32_to_cpu(error->ioa_data[2]));
1019}
1020
ee0f05b8 1021/**
1022 * ipr_log_enhanced_config_error - Log a configuration error.
1023 * @ioa_cfg: ioa config struct
1024 * @hostrcb: hostrcb struct
1025 *
1026 * Return value:
1027 * none
1028 **/
1029static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1030 struct ipr_hostrcb *hostrcb)
1031{
1032 int errors_logged, i;
1033 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1034 struct ipr_hostrcb_type_13_error *error;
1035
1036 error = &hostrcb->hcam.u.error.u.type_13_error;
1037 errors_logged = be32_to_cpu(error->errors_logged);
1038
1039 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1040 be32_to_cpu(error->errors_detected), errors_logged);
1041
1042 dev_entry = error->dev;
1043
1044 for (i = 0; i < errors_logged; i++, dev_entry++) {
1045 ipr_err_separator;
1046
1047 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1048 ipr_log_ext_vpd(&dev_entry->vpd);
1049
1050 ipr_err("-----New Device Information-----\n");
1051 ipr_log_ext_vpd(&dev_entry->new_vpd);
1052
1053 ipr_err("Cache Directory Card Information:\n");
1054 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1055
1056 ipr_err("Adapter Card Information:\n");
1057 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1058 }
1059}
1060
1da177e4
LT
1061/**
1062 * ipr_log_config_error - Log a configuration error.
1063 * @ioa_cfg: ioa config struct
1064 * @hostrcb: hostrcb struct
1065 *
1066 * Return value:
1067 * none
1068 **/
1069static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1070 struct ipr_hostrcb *hostrcb)
1071{
1072 int errors_logged, i;
1073 struct ipr_hostrcb_device_data_entry *dev_entry;
1074 struct ipr_hostrcb_type_03_error *error;
1075
1076 error = &hostrcb->hcam.u.error.u.type_03_error;
1077 errors_logged = be32_to_cpu(error->errors_logged);
1078
1079 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1080 be32_to_cpu(error->errors_detected), errors_logged);
1081
cfc32139 1082 dev_entry = error->dev;
1da177e4
LT
1083
1084 for (i = 0; i < errors_logged; i++, dev_entry++) {
1085 ipr_err_separator;
1086
fa15b1f6 1087 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1088 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1089
1090 ipr_err("-----New Device Information-----\n");
cfc32139 1091 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1092
1093 ipr_err("Cache Directory Card Information:\n");
cfc32139 1094 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1095
1096 ipr_err("Adapter Card Information:\n");
cfc32139 1097 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1098
1099 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1100 be32_to_cpu(dev_entry->ioa_data[0]),
1101 be32_to_cpu(dev_entry->ioa_data[1]),
1102 be32_to_cpu(dev_entry->ioa_data[2]),
1103 be32_to_cpu(dev_entry->ioa_data[3]),
1104 be32_to_cpu(dev_entry->ioa_data[4]));
1105 }
1106}
1107
ee0f05b8 1108/**
1109 * ipr_log_enhanced_array_error - Log an array configuration error.
1110 * @ioa_cfg: ioa config struct
1111 * @hostrcb: hostrcb struct
1112 *
1113 * Return value:
1114 * none
1115 **/
1116static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1117 struct ipr_hostrcb *hostrcb)
1118{
1119 int i, num_entries;
1120 struct ipr_hostrcb_type_14_error *error;
1121 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1122 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1123
1124 error = &hostrcb->hcam.u.error.u.type_14_error;
1125
1126 ipr_err_separator;
1127
1128 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1129 error->protection_level,
1130 ioa_cfg->host->host_no,
1131 error->last_func_vset_res_addr.bus,
1132 error->last_func_vset_res_addr.target,
1133 error->last_func_vset_res_addr.lun);
1134
1135 ipr_err_separator;
1136
1137 array_entry = error->array_member;
1138 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1139 sizeof(error->array_member));
1140
1141 for (i = 0; i < num_entries; i++, array_entry++) {
1142 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1143 continue;
1144
1145 if (be32_to_cpu(error->exposed_mode_adn) == i)
1146 ipr_err("Exposed Array Member %d:\n", i);
1147 else
1148 ipr_err("Array Member %d:\n", i);
1149
1150 ipr_log_ext_vpd(&array_entry->vpd);
1151 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1152 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1153 "Expected Location");
1154
1155 ipr_err_separator;
1156 }
1157}
1158
1da177e4
LT
1159/**
1160 * ipr_log_array_error - Log an array configuration error.
1161 * @ioa_cfg: ioa config struct
1162 * @hostrcb: hostrcb struct
1163 *
1164 * Return value:
1165 * none
1166 **/
1167static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1168 struct ipr_hostrcb *hostrcb)
1169{
1170 int i;
1171 struct ipr_hostrcb_type_04_error *error;
1172 struct ipr_hostrcb_array_data_entry *array_entry;
1173 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1174
1175 error = &hostrcb->hcam.u.error.u.type_04_error;
1176
1177 ipr_err_separator;
1178
1179 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1180 error->protection_level,
1181 ioa_cfg->host->host_no,
1182 error->last_func_vset_res_addr.bus,
1183 error->last_func_vset_res_addr.target,
1184 error->last_func_vset_res_addr.lun);
1185
1186 ipr_err_separator;
1187
1188 array_entry = error->array_member;
1189
1190 for (i = 0; i < 18; i++) {
cfc32139 1191 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1192 continue;
1193
fa15b1f6 1194 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1195 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1196 else
1da177e4 1197 ipr_err("Array Member %d:\n", i);
1da177e4 1198
cfc32139 1199 ipr_log_vpd(&array_entry->vpd);
1da177e4 1200
fa15b1f6 1201 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1202 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1203 "Expected Location");
1da177e4
LT
1204
1205 ipr_err_separator;
1206
1207 if (i == 9)
1208 array_entry = error->array_member2;
1209 else
1210 array_entry++;
1211 }
1212}
1213
1214/**
b0df54bb 1215 * ipr_log_hex_data - Log additional hex IOA error data.
1216 * @data: IOA error data
1217 * @len: data length
1da177e4
LT
1218 *
1219 * Return value:
1220 * none
1221 **/
b0df54bb 1222static void ipr_log_hex_data(u32 *data, int len)
1da177e4
LT
1223{
1224 int i;
1da177e4 1225
b0df54bb 1226 if (len == 0)
1da177e4
LT
1227 return;
1228
b0df54bb 1229 for (i = 0; i < len / 4; i += 4) {
1da177e4 1230 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1231 be32_to_cpu(data[i]),
1232 be32_to_cpu(data[i+1]),
1233 be32_to_cpu(data[i+2]),
1234 be32_to_cpu(data[i+3]));
1da177e4
LT
1235 }
1236}
1237
ee0f05b8 1238/**
1239 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1240 * @ioa_cfg: ioa config struct
1241 * @hostrcb: hostrcb struct
1242 *
1243 * Return value:
1244 * none
1245 **/
1246static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1247 struct ipr_hostrcb *hostrcb)
1248{
1249 struct ipr_hostrcb_type_17_error *error;
1250
1251 error = &hostrcb->hcam.u.error.u.type_17_error;
1252 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1253
1254 ipr_err("%s\n", error->failure_reason);
1255 ipr_err("Remote Adapter VPD:\n");
1256 ipr_log_ext_vpd(&error->vpd);
1257 ipr_log_hex_data(error->data,
1258 be32_to_cpu(hostrcb->hcam.length) -
1259 (offsetof(struct ipr_hostrcb_error, u) +
1260 offsetof(struct ipr_hostrcb_type_17_error, data)));
1261}
1262
b0df54bb 1263/**
1264 * ipr_log_dual_ioa_error - Log a dual adapter error.
1265 * @ioa_cfg: ioa config struct
1266 * @hostrcb: hostrcb struct
1267 *
1268 * Return value:
1269 * none
1270 **/
1271static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1272 struct ipr_hostrcb *hostrcb)
1273{
1274 struct ipr_hostrcb_type_07_error *error;
1275
1276 error = &hostrcb->hcam.u.error.u.type_07_error;
1277 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1278
1279 ipr_err("%s\n", error->failure_reason);
1280 ipr_err("Remote Adapter VPD:\n");
1281 ipr_log_vpd(&error->vpd);
1282 ipr_log_hex_data(error->data,
1283 be32_to_cpu(hostrcb->hcam.length) -
1284 (offsetof(struct ipr_hostrcb_error, u) +
1285 offsetof(struct ipr_hostrcb_type_07_error, data)));
1286}
1287
1288/**
1289 * ipr_log_generic_error - Log an adapter error.
1290 * @ioa_cfg: ioa config struct
1291 * @hostrcb: hostrcb struct
1292 *
1293 * Return value:
1294 * none
1295 **/
1296static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1297 struct ipr_hostrcb *hostrcb)
1298{
1299 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1300 be32_to_cpu(hostrcb->hcam.length));
1301}
1302
1da177e4
LT
1303/**
1304 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1305 * @ioasc: IOASC
1306 *
1307 * This function will return the index of into the ipr_error_table
1308 * for the specified IOASC. If the IOASC is not in the table,
1309 * 0 will be returned, which points to the entry used for unknown errors.
1310 *
1311 * Return value:
1312 * index into the ipr_error_table
1313 **/
1314static u32 ipr_get_error(u32 ioasc)
1315{
1316 int i;
1317
1318 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1319 if (ipr_error_table[i].ioasc == ioasc)
1320 return i;
1321
1322 return 0;
1323}
1324
1325/**
1326 * ipr_handle_log_data - Log an adapter error.
1327 * @ioa_cfg: ioa config struct
1328 * @hostrcb: hostrcb struct
1329 *
1330 * This function logs an adapter error to the system.
1331 *
1332 * Return value:
1333 * none
1334 **/
1335static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1336 struct ipr_hostrcb *hostrcb)
1337{
1338 u32 ioasc;
1339 int error_index;
1340
1341 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1342 return;
1343
1344 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1345 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1346
1347 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1348
1349 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1350 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1351 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1352 scsi_report_bus_reset(ioa_cfg->host,
1353 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1354 }
1355
1356 error_index = ipr_get_error(ioasc);
1357
1358 if (!ipr_error_table[error_index].log_hcam)
1359 return;
1360
1361 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
fb3ed3cb
BK
1362 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1363 "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1364 } else {
1365 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1366 ipr_error_table[error_index].error);
1367 }
1368
1369 /* Set indication we have logged an error */
1370 ioa_cfg->errors_logged++;
1371
1372 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1373 return;
cf852037 1374 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1375 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1376
1377 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1378 case IPR_HOST_RCB_OVERLAY_ID_2:
1379 ipr_log_cache_error(ioa_cfg, hostrcb);
1380 break;
1381 case IPR_HOST_RCB_OVERLAY_ID_3:
1382 ipr_log_config_error(ioa_cfg, hostrcb);
1383 break;
1384 case IPR_HOST_RCB_OVERLAY_ID_4:
1385 case IPR_HOST_RCB_OVERLAY_ID_6:
1386 ipr_log_array_error(ioa_cfg, hostrcb);
1387 break;
b0df54bb 1388 case IPR_HOST_RCB_OVERLAY_ID_7:
1389 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1390 break;
ee0f05b8 1391 case IPR_HOST_RCB_OVERLAY_ID_12:
1392 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1393 break;
1394 case IPR_HOST_RCB_OVERLAY_ID_13:
1395 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1396 break;
1397 case IPR_HOST_RCB_OVERLAY_ID_14:
1398 case IPR_HOST_RCB_OVERLAY_ID_16:
1399 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1400 break;
1401 case IPR_HOST_RCB_OVERLAY_ID_17:
1402 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1403 break;
cf852037 1404 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1405 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1406 default:
a9cfca96 1407 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1408 break;
1409 }
1410}
1411
1412/**
1413 * ipr_process_error - Op done function for an adapter error log.
1414 * @ipr_cmd: ipr command struct
1415 *
1416 * This function is the op done function for an error log host
1417 * controlled async from the adapter. It will log the error and
1418 * send the HCAM back to the adapter.
1419 *
1420 * Return value:
1421 * none
1422 **/
1423static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1424{
1425 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1426 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1427 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1428
1429 list_del(&hostrcb->queue);
1430 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1431
1432 if (!ioasc) {
1433 ipr_handle_log_data(ioa_cfg, hostrcb);
1434 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1435 dev_err(&ioa_cfg->pdev->dev,
1436 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1437 }
1438
1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1440}
1441
1442/**
1443 * ipr_timeout - An internally generated op has timed out.
1444 * @ipr_cmd: ipr command struct
1445 *
1446 * This function blocks host requests and initiates an
1447 * adapter reset.
1448 *
1449 * Return value:
1450 * none
1451 **/
1452static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1453{
1454 unsigned long lock_flags = 0;
1455 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1456
1457 ENTER;
1458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1459
1460 ioa_cfg->errors_logged++;
1461 dev_err(&ioa_cfg->pdev->dev,
1462 "Adapter being reset due to command timeout.\n");
1463
1464 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1465 ioa_cfg->sdt_state = GET_DUMP;
1466
1467 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1469
1470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1471 LEAVE;
1472}
1473
1474/**
1475 * ipr_oper_timeout - Adapter timed out transitioning to operational
1476 * @ipr_cmd: ipr command struct
1477 *
1478 * This function blocks host requests and initiates an
1479 * adapter reset.
1480 *
1481 * Return value:
1482 * none
1483 **/
1484static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1485{
1486 unsigned long lock_flags = 0;
1487 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1488
1489 ENTER;
1490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1491
1492 ioa_cfg->errors_logged++;
1493 dev_err(&ioa_cfg->pdev->dev,
1494 "Adapter timed out transitioning to operational.\n");
1495
1496 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1497 ioa_cfg->sdt_state = GET_DUMP;
1498
1499 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1500 if (ipr_fastfail)
1501 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1502 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1503 }
1504
1505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1506 LEAVE;
1507}
1508
1509/**
1510 * ipr_reset_reload - Reset/Reload the IOA
1511 * @ioa_cfg: ioa config struct
1512 * @shutdown_type: shutdown type
1513 *
1514 * This function resets the adapter and re-initializes it.
1515 * This function assumes that all new host commands have been stopped.
1516 * Return value:
1517 * SUCCESS / FAILED
1518 **/
1519static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1520 enum ipr_shutdown_type shutdown_type)
1521{
1522 if (!ioa_cfg->in_reset_reload)
1523 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1524
1525 spin_unlock_irq(ioa_cfg->host->host_lock);
1526 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1527 spin_lock_irq(ioa_cfg->host->host_lock);
1528
1529 /* If we got hit with a host reset while we were already resetting
1530 the adapter for some reason, and the reset failed. */
1531 if (ioa_cfg->ioa_is_dead) {
1532 ipr_trace;
1533 return FAILED;
1534 }
1535
1536 return SUCCESS;
1537}
1538
1539/**
1540 * ipr_find_ses_entry - Find matching SES in SES table
1541 * @res: resource entry struct of SES
1542 *
1543 * Return value:
1544 * pointer to SES table entry / NULL on failure
1545 **/
1546static const struct ipr_ses_table_entry *
1547ipr_find_ses_entry(struct ipr_resource_entry *res)
1548{
1549 int i, j, matches;
1550 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1551
1552 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1553 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1554 if (ste->compare_product_id_byte[j] == 'X') {
1555 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1556 matches++;
1557 else
1558 break;
1559 } else
1560 matches++;
1561 }
1562
1563 if (matches == IPR_PROD_ID_LEN)
1564 return ste;
1565 }
1566
1567 return NULL;
1568}
1569
1570/**
1571 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1572 * @ioa_cfg: ioa config struct
1573 * @bus: SCSI bus
1574 * @bus_width: bus width
1575 *
1576 * Return value:
1577 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1578 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1579 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1580 * max 160MHz = max 320MB/sec).
1581 **/
1582static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1583{
1584 struct ipr_resource_entry *res;
1585 const struct ipr_ses_table_entry *ste;
1586 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1587
1588 /* Loop through each config table entry in the config table buffer */
1589 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1590 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1591 continue;
1592
1593 if (bus != res->cfgte.res_addr.bus)
1594 continue;
1595
1596 if (!(ste = ipr_find_ses_entry(res)))
1597 continue;
1598
1599 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1600 }
1601
1602 return max_xfer_rate;
1603}
1604
1605/**
1606 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1607 * @ioa_cfg: ioa config struct
1608 * @max_delay: max delay in micro-seconds to wait
1609 *
1610 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1611 *
1612 * Return value:
1613 * 0 on success / other on failure
1614 **/
1615static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1616{
1617 volatile u32 pcii_reg;
1618 int delay = 1;
1619
1620 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1621 while (delay < max_delay) {
1622 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1623
1624 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1625 return 0;
1626
1627 /* udelay cannot be used if delay is more than a few milliseconds */
1628 if ((delay / 1000) > MAX_UDELAY_MS)
1629 mdelay(delay / 1000);
1630 else
1631 udelay(delay);
1632
1633 delay += delay;
1634 }
1635 return -EIO;
1636}
1637
1638/**
1639 * ipr_get_ldump_data_section - Dump IOA memory
1640 * @ioa_cfg: ioa config struct
1641 * @start_addr: adapter address to dump
1642 * @dest: destination kernel buffer
1643 * @length_in_words: length to dump in 4 byte words
1644 *
1645 * Return value:
1646 * 0 on success / -EIO on failure
1647 **/
1648static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1649 u32 start_addr,
1650 __be32 *dest, u32 length_in_words)
1651{
1652 volatile u32 temp_pcii_reg;
1653 int i, delay = 0;
1654
1655 /* Write IOA interrupt reg starting LDUMP state */
1656 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1657 ioa_cfg->regs.set_uproc_interrupt_reg);
1658
1659 /* Wait for IO debug acknowledge */
1660 if (ipr_wait_iodbg_ack(ioa_cfg,
1661 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1662 dev_err(&ioa_cfg->pdev->dev,
1663 "IOA dump long data transfer timeout\n");
1664 return -EIO;
1665 }
1666
1667 /* Signal LDUMP interlocked - clear IO debug ack */
1668 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1669 ioa_cfg->regs.clr_interrupt_reg);
1670
1671 /* Write Mailbox with starting address */
1672 writel(start_addr, ioa_cfg->ioa_mailbox);
1673
1674 /* Signal address valid - clear IOA Reset alert */
1675 writel(IPR_UPROCI_RESET_ALERT,
1676 ioa_cfg->regs.clr_uproc_interrupt_reg);
1677
1678 for (i = 0; i < length_in_words; i++) {
1679 /* Wait for IO debug acknowledge */
1680 if (ipr_wait_iodbg_ack(ioa_cfg,
1681 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1682 dev_err(&ioa_cfg->pdev->dev,
1683 "IOA dump short data transfer timeout\n");
1684 return -EIO;
1685 }
1686
1687 /* Read data from mailbox and increment destination pointer */
1688 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1689 dest++;
1690
1691 /* For all but the last word of data, signal data received */
1692 if (i < (length_in_words - 1)) {
1693 /* Signal dump data received - Clear IO debug Ack */
1694 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1695 ioa_cfg->regs.clr_interrupt_reg);
1696 }
1697 }
1698
1699 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1700 writel(IPR_UPROCI_RESET_ALERT,
1701 ioa_cfg->regs.set_uproc_interrupt_reg);
1702
1703 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1704 ioa_cfg->regs.clr_uproc_interrupt_reg);
1705
1706 /* Signal dump data received - Clear IO debug Ack */
1707 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1708 ioa_cfg->regs.clr_interrupt_reg);
1709
1710 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1711 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1712 temp_pcii_reg =
1713 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1714
1715 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1716 return 0;
1717
1718 udelay(10);
1719 delay += 10;
1720 }
1721
1722 return 0;
1723}
1724
1725#ifdef CONFIG_SCSI_IPR_DUMP
1726/**
1727 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1728 * @ioa_cfg: ioa config struct
1729 * @pci_address: adapter address
1730 * @length: length of data to copy
1731 *
1732 * Copy data from PCI adapter to kernel buffer.
1733 * Note: length MUST be a 4 byte multiple
1734 * Return value:
1735 * 0 on success / other on failure
1736 **/
1737static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1738 unsigned long pci_address, u32 length)
1739{
1740 int bytes_copied = 0;
1741 int cur_len, rc, rem_len, rem_page_len;
1742 __be32 *page;
1743 unsigned long lock_flags = 0;
1744 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1745
1746 while (bytes_copied < length &&
1747 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1748 if (ioa_dump->page_offset >= PAGE_SIZE ||
1749 ioa_dump->page_offset == 0) {
1750 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1751
1752 if (!page) {
1753 ipr_trace;
1754 return bytes_copied;
1755 }
1756
1757 ioa_dump->page_offset = 0;
1758 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1759 ioa_dump->next_page_index++;
1760 } else
1761 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1762
1763 rem_len = length - bytes_copied;
1764 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1765 cur_len = min(rem_len, rem_page_len);
1766
1767 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1768 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1769 rc = -EIO;
1770 } else {
1771 rc = ipr_get_ldump_data_section(ioa_cfg,
1772 pci_address + bytes_copied,
1773 &page[ioa_dump->page_offset / 4],
1774 (cur_len / sizeof(u32)));
1775 }
1776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1777
1778 if (!rc) {
1779 ioa_dump->page_offset += cur_len;
1780 bytes_copied += cur_len;
1781 } else {
1782 ipr_trace;
1783 break;
1784 }
1785 schedule();
1786 }
1787
1788 return bytes_copied;
1789}
1790
1791/**
1792 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1793 * @hdr: dump entry header struct
1794 *
1795 * Return value:
1796 * nothing
1797 **/
1798static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1799{
1800 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1801 hdr->num_elems = 1;
1802 hdr->offset = sizeof(*hdr);
1803 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1804}
1805
1806/**
1807 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1808 * @ioa_cfg: ioa config struct
1809 * @driver_dump: driver dump struct
1810 *
1811 * Return value:
1812 * nothing
1813 **/
1814static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1815 struct ipr_driver_dump *driver_dump)
1816{
1817 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1818
1819 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1820 driver_dump->ioa_type_entry.hdr.len =
1821 sizeof(struct ipr_dump_ioa_type_entry) -
1822 sizeof(struct ipr_dump_entry_header);
1823 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1824 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1825 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1826 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1827 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1828 ucode_vpd->minor_release[1];
1829 driver_dump->hdr.num_entries++;
1830}
1831
1832/**
1833 * ipr_dump_version_data - Fill in the driver version in the dump.
1834 * @ioa_cfg: ioa config struct
1835 * @driver_dump: driver dump struct
1836 *
1837 * Return value:
1838 * nothing
1839 **/
1840static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1841 struct ipr_driver_dump *driver_dump)
1842{
1843 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1844 driver_dump->version_entry.hdr.len =
1845 sizeof(struct ipr_dump_version_entry) -
1846 sizeof(struct ipr_dump_entry_header);
1847 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1848 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1849 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1850 driver_dump->hdr.num_entries++;
1851}
1852
1853/**
1854 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1855 * @ioa_cfg: ioa config struct
1856 * @driver_dump: driver dump struct
1857 *
1858 * Return value:
1859 * nothing
1860 **/
1861static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1862 struct ipr_driver_dump *driver_dump)
1863{
1864 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1865 driver_dump->trace_entry.hdr.len =
1866 sizeof(struct ipr_dump_trace_entry) -
1867 sizeof(struct ipr_dump_entry_header);
1868 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1869 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1870 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1871 driver_dump->hdr.num_entries++;
1872}
1873
1874/**
1875 * ipr_dump_location_data - Fill in the IOA location in the dump.
1876 * @ioa_cfg: ioa config struct
1877 * @driver_dump: driver dump struct
1878 *
1879 * Return value:
1880 * nothing
1881 **/
1882static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1883 struct ipr_driver_dump *driver_dump)
1884{
1885 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1886 driver_dump->location_entry.hdr.len =
1887 sizeof(struct ipr_dump_location_entry) -
1888 sizeof(struct ipr_dump_entry_header);
1889 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1890 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1891 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1892 driver_dump->hdr.num_entries++;
1893}
1894
1895/**
1896 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1897 * @ioa_cfg: ioa config struct
1898 * @dump: dump struct
1899 *
1900 * Return value:
1901 * nothing
1902 **/
1903static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1904{
1905 unsigned long start_addr, sdt_word;
1906 unsigned long lock_flags = 0;
1907 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1908 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1909 u32 num_entries, start_off, end_off;
1910 u32 bytes_to_copy, bytes_copied, rc;
1911 struct ipr_sdt *sdt;
1912 int i;
1913
1914 ENTER;
1915
1916 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1917
1918 if (ioa_cfg->sdt_state != GET_DUMP) {
1919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1920 return;
1921 }
1922
1923 start_addr = readl(ioa_cfg->ioa_mailbox);
1924
1925 if (!ipr_sdt_is_fmt2(start_addr)) {
1926 dev_err(&ioa_cfg->pdev->dev,
1927 "Invalid dump table format: %lx\n", start_addr);
1928 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1929 return;
1930 }
1931
1932 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1933
1934 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1935
1936 /* Initialize the overall dump header */
1937 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1938 driver_dump->hdr.num_entries = 1;
1939 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1940 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1941 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1942 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1943
1944 ipr_dump_version_data(ioa_cfg, driver_dump);
1945 ipr_dump_location_data(ioa_cfg, driver_dump);
1946 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1947 ipr_dump_trace_data(ioa_cfg, driver_dump);
1948
1949 /* Update dump_header */
1950 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1951
1952 /* IOA Dump entry */
1953 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1954 ioa_dump->format = IPR_SDT_FMT2;
1955 ioa_dump->hdr.len = 0;
1956 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1957 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1958
1959 /* First entries in sdt are actually a list of dump addresses and
1960 lengths to gather the real dump data. sdt represents the pointer
1961 to the ioa generated dump table. Dump data will be extracted based
1962 on entries in this table */
1963 sdt = &ioa_dump->sdt;
1964
1965 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1966 sizeof(struct ipr_sdt) / sizeof(__be32));
1967
1968 /* Smart Dump table is ready to use and the first entry is valid */
1969 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1970 dev_err(&ioa_cfg->pdev->dev,
1971 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1972 rc, be32_to_cpu(sdt->hdr.state));
1973 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1974 ioa_cfg->sdt_state = DUMP_OBTAINED;
1975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1976 return;
1977 }
1978
1979 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1980
1981 if (num_entries > IPR_NUM_SDT_ENTRIES)
1982 num_entries = IPR_NUM_SDT_ENTRIES;
1983
1984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1985
1986 for (i = 0; i < num_entries; i++) {
1987 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1988 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1989 break;
1990 }
1991
1992 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1993 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1994 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1995 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1996
1997 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1998 bytes_to_copy = end_off - start_off;
1999 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2000 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2001 continue;
2002 }
2003
2004 /* Copy data from adapter to driver buffers */
2005 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2006 bytes_to_copy);
2007
2008 ioa_dump->hdr.len += bytes_copied;
2009
2010 if (bytes_copied != bytes_to_copy) {
2011 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2012 break;
2013 }
2014 }
2015 }
2016 }
2017
2018 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2019
2020 /* Update dump_header */
2021 driver_dump->hdr.len += ioa_dump->hdr.len;
2022 wmb();
2023 ioa_cfg->sdt_state = DUMP_OBTAINED;
2024 LEAVE;
2025}
2026
2027#else
2028#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2029#endif
2030
2031/**
2032 * ipr_release_dump - Free adapter dump memory
2033 * @kref: kref struct
2034 *
2035 * Return value:
2036 * nothing
2037 **/
2038static void ipr_release_dump(struct kref *kref)
2039{
2040 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2041 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2042 unsigned long lock_flags = 0;
2043 int i;
2044
2045 ENTER;
2046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2047 ioa_cfg->dump = NULL;
2048 ioa_cfg->sdt_state = INACTIVE;
2049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2050
2051 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2052 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2053
2054 kfree(dump);
2055 LEAVE;
2056}
2057
2058/**
2059 * ipr_worker_thread - Worker thread
2060 * @data: ioa config struct
2061 *
2062 * Called at task level from a work thread. This function takes care
2063 * of adding and removing device from the mid-layer as configuration
2064 * changes are detected by the adapter.
2065 *
2066 * Return value:
2067 * nothing
2068 **/
2069static void ipr_worker_thread(void *data)
2070{
2071 unsigned long lock_flags;
2072 struct ipr_resource_entry *res;
2073 struct scsi_device *sdev;
2074 struct ipr_dump *dump;
2075 struct ipr_ioa_cfg *ioa_cfg = data;
2076 u8 bus, target, lun;
2077 int did_work;
2078
2079 ENTER;
2080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2081
2082 if (ioa_cfg->sdt_state == GET_DUMP) {
2083 dump = ioa_cfg->dump;
2084 if (!dump) {
2085 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2086 return;
2087 }
2088 kref_get(&dump->kref);
2089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2090 ipr_get_ioa_dump(ioa_cfg, dump);
2091 kref_put(&dump->kref, ipr_release_dump);
2092
2093 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2094 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2095 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2096 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2097 return;
2098 }
2099
2100restart:
2101 do {
2102 did_work = 0;
2103 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2104 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105 return;
2106 }
2107
2108 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2109 if (res->del_from_ml && res->sdev) {
2110 did_work = 1;
2111 sdev = res->sdev;
2112 if (!scsi_device_get(sdev)) {
1da177e4
LT
2113 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2115 scsi_remove_device(sdev);
2116 scsi_device_put(sdev);
2117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2118 }
2119 break;
2120 }
2121 }
2122 } while(did_work);
2123
2124 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2125 if (res->add_to_ml) {
2126 bus = res->cfgte.res_addr.bus;
2127 target = res->cfgte.res_addr.target;
2128 lun = res->cfgte.res_addr.lun;
1121b794 2129 res->add_to_ml = 0;
1da177e4
LT
2130 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2131 scsi_add_device(ioa_cfg->host, bus, target, lun);
2132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2133 goto restart;
2134 }
2135 }
2136
2137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
312c004d 2138 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
1da177e4
LT
2139 LEAVE;
2140}
2141
2142#ifdef CONFIG_SCSI_IPR_TRACE
2143/**
2144 * ipr_read_trace - Dump the adapter trace
2145 * @kobj: kobject struct
2146 * @buf: buffer
2147 * @off: offset
2148 * @count: buffer size
2149 *
2150 * Return value:
2151 * number of bytes printed to buffer
2152 **/
2153static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2154 loff_t off, size_t count)
2155{
2156 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2157 struct Scsi_Host *shost = class_to_shost(cdev);
2158 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2159 unsigned long lock_flags = 0;
2160 int size = IPR_TRACE_SIZE;
2161 char *src = (char *)ioa_cfg->trace;
2162
2163 if (off > size)
2164 return 0;
2165 if (off + count > size) {
2166 size -= off;
2167 count = size;
2168 }
2169
2170 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2171 memcpy(buf, &src[off], count);
2172 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2173 return count;
2174}
2175
2176static struct bin_attribute ipr_trace_attr = {
2177 .attr = {
2178 .name = "trace",
2179 .mode = S_IRUGO,
2180 },
2181 .size = 0,
2182 .read = ipr_read_trace,
2183};
2184#endif
2185
62275040 2186static const struct {
2187 enum ipr_cache_state state;
2188 char *name;
2189} cache_state [] = {
2190 { CACHE_NONE, "none" },
2191 { CACHE_DISABLED, "disabled" },
2192 { CACHE_ENABLED, "enabled" }
2193};
2194
2195/**
2196 * ipr_show_write_caching - Show the write caching attribute
2197 * @class_dev: class device struct
2198 * @buf: buffer
2199 *
2200 * Return value:
2201 * number of bytes printed to buffer
2202 **/
2203static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2204{
2205 struct Scsi_Host *shost = class_to_shost(class_dev);
2206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2207 unsigned long lock_flags = 0;
2208 int i, len = 0;
2209
2210 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2211 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2212 if (cache_state[i].state == ioa_cfg->cache_state) {
2213 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2214 break;
2215 }
2216 }
2217 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2218 return len;
2219}
2220
2221
2222/**
2223 * ipr_store_write_caching - Enable/disable adapter write cache
2224 * @class_dev: class_device struct
2225 * @buf: buffer
2226 * @count: buffer size
2227 *
2228 * This function will enable/disable adapter write cache.
2229 *
2230 * Return value:
2231 * count on success / other on failure
2232 **/
2233static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2234 const char *buf, size_t count)
2235{
2236 struct Scsi_Host *shost = class_to_shost(class_dev);
2237 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2238 unsigned long lock_flags = 0;
2239 enum ipr_cache_state new_state = CACHE_INVALID;
2240 int i;
2241
2242 if (!capable(CAP_SYS_ADMIN))
2243 return -EACCES;
2244 if (ioa_cfg->cache_state == CACHE_NONE)
2245 return -EINVAL;
2246
2247 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2248 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2249 new_state = cache_state[i].state;
2250 break;
2251 }
2252 }
2253
2254 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2255 return -EINVAL;
2256
2257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2258 if (ioa_cfg->cache_state == new_state) {
2259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2260 return count;
2261 }
2262
2263 ioa_cfg->cache_state = new_state;
2264 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2265 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2266 if (!ioa_cfg->in_reset_reload)
2267 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2269 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2270
2271 return count;
2272}
2273
2274static struct class_device_attribute ipr_ioa_cache_attr = {
2275 .attr = {
2276 .name = "write_cache",
2277 .mode = S_IRUGO | S_IWUSR,
2278 },
2279 .show = ipr_show_write_caching,
2280 .store = ipr_store_write_caching
2281};
2282
1da177e4
LT
2283/**
2284 * ipr_show_fw_version - Show the firmware version
2285 * @class_dev: class device struct
2286 * @buf: buffer
2287 *
2288 * Return value:
2289 * number of bytes printed to buffer
2290 **/
2291static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2292{
2293 struct Scsi_Host *shost = class_to_shost(class_dev);
2294 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2295 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2296 unsigned long lock_flags = 0;
2297 int len;
2298
2299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2300 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2301 ucode_vpd->major_release, ucode_vpd->card_type,
2302 ucode_vpd->minor_release[0],
2303 ucode_vpd->minor_release[1]);
2304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2305 return len;
2306}
2307
2308static struct class_device_attribute ipr_fw_version_attr = {
2309 .attr = {
2310 .name = "fw_version",
2311 .mode = S_IRUGO,
2312 },
2313 .show = ipr_show_fw_version,
2314};
2315
2316/**
2317 * ipr_show_log_level - Show the adapter's error logging level
2318 * @class_dev: class device struct
2319 * @buf: buffer
2320 *
2321 * Return value:
2322 * number of bytes printed to buffer
2323 **/
2324static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2325{
2326 struct Scsi_Host *shost = class_to_shost(class_dev);
2327 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2328 unsigned long lock_flags = 0;
2329 int len;
2330
2331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2332 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2334 return len;
2335}
2336
2337/**
2338 * ipr_store_log_level - Change the adapter's error logging level
2339 * @class_dev: class device struct
2340 * @buf: buffer
2341 *
2342 * Return value:
2343 * number of bytes printed to buffer
2344 **/
2345static ssize_t ipr_store_log_level(struct class_device *class_dev,
2346 const char *buf, size_t count)
2347{
2348 struct Scsi_Host *shost = class_to_shost(class_dev);
2349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2350 unsigned long lock_flags = 0;
2351
2352 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2353 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2355 return strlen(buf);
2356}
2357
2358static struct class_device_attribute ipr_log_level_attr = {
2359 .attr = {
2360 .name = "log_level",
2361 .mode = S_IRUGO | S_IWUSR,
2362 },
2363 .show = ipr_show_log_level,
2364 .store = ipr_store_log_level
2365};
2366
2367/**
2368 * ipr_store_diagnostics - IOA Diagnostics interface
2369 * @class_dev: class_device struct
2370 * @buf: buffer
2371 * @count: buffer size
2372 *
2373 * This function will reset the adapter and wait a reasonable
2374 * amount of time for any errors that the adapter might log.
2375 *
2376 * Return value:
2377 * count on success / other on failure
2378 **/
2379static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2380 const char *buf, size_t count)
2381{
2382 struct Scsi_Host *shost = class_to_shost(class_dev);
2383 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2384 unsigned long lock_flags = 0;
2385 int rc = count;
2386
2387 if (!capable(CAP_SYS_ADMIN))
2388 return -EACCES;
2389
2390 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2391 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2392 ioa_cfg->errors_logged = 0;
2393 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2394
2395 if (ioa_cfg->in_reset_reload) {
2396 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2397 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2398
2399 /* Wait for a second for any errors to be logged */
2400 msleep(1000);
2401 } else {
2402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2403 return -EIO;
2404 }
2405
2406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2408 rc = -EIO;
2409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2410
2411 return rc;
2412}
2413
2414static struct class_device_attribute ipr_diagnostics_attr = {
2415 .attr = {
2416 .name = "run_diagnostics",
2417 .mode = S_IWUSR,
2418 },
2419 .store = ipr_store_diagnostics
2420};
2421
f37eb54b 2422/**
2423 * ipr_show_adapter_state - Show the adapter's state
2424 * @class_dev: class device struct
2425 * @buf: buffer
2426 *
2427 * Return value:
2428 * number of bytes printed to buffer
2429 **/
2430static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2431{
2432 struct Scsi_Host *shost = class_to_shost(class_dev);
2433 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2434 unsigned long lock_flags = 0;
2435 int len;
2436
2437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2438 if (ioa_cfg->ioa_is_dead)
2439 len = snprintf(buf, PAGE_SIZE, "offline\n");
2440 else
2441 len = snprintf(buf, PAGE_SIZE, "online\n");
2442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2443 return len;
2444}
2445
2446/**
2447 * ipr_store_adapter_state - Change adapter state
2448 * @class_dev: class_device struct
2449 * @buf: buffer
2450 * @count: buffer size
2451 *
2452 * This function will change the adapter's state.
2453 *
2454 * Return value:
2455 * count on success / other on failure
2456 **/
2457static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2458 const char *buf, size_t count)
2459{
2460 struct Scsi_Host *shost = class_to_shost(class_dev);
2461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2462 unsigned long lock_flags;
2463 int result = count;
2464
2465 if (!capable(CAP_SYS_ADMIN))
2466 return -EACCES;
2467
2468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2469 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2470 ioa_cfg->ioa_is_dead = 0;
2471 ioa_cfg->reset_retries = 0;
2472 ioa_cfg->in_ioa_bringdown = 0;
2473 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2474 }
2475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2477
2478 return result;
2479}
2480
2481static struct class_device_attribute ipr_ioa_state_attr = {
2482 .attr = {
2483 .name = "state",
2484 .mode = S_IRUGO | S_IWUSR,
2485 },
2486 .show = ipr_show_adapter_state,
2487 .store = ipr_store_adapter_state
2488};
2489
1da177e4
LT
2490/**
2491 * ipr_store_reset_adapter - Reset the adapter
2492 * @class_dev: class_device struct
2493 * @buf: buffer
2494 * @count: buffer size
2495 *
2496 * This function will reset the adapter.
2497 *
2498 * Return value:
2499 * count on success / other on failure
2500 **/
2501static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2502 const char *buf, size_t count)
2503{
2504 struct Scsi_Host *shost = class_to_shost(class_dev);
2505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2506 unsigned long lock_flags;
2507 int result = count;
2508
2509 if (!capable(CAP_SYS_ADMIN))
2510 return -EACCES;
2511
2512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2513 if (!ioa_cfg->in_reset_reload)
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2516 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2517
2518 return result;
2519}
2520
2521static struct class_device_attribute ipr_ioa_reset_attr = {
2522 .attr = {
2523 .name = "reset_host",
2524 .mode = S_IWUSR,
2525 },
2526 .store = ipr_store_reset_adapter
2527};
2528
2529/**
2530 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2531 * @buf_len: buffer length
2532 *
2533 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2534 * list to use for microcode download
2535 *
2536 * Return value:
2537 * pointer to sglist / NULL on failure
2538 **/
2539static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2540{
2541 int sg_size, order, bsize_elem, num_elem, i, j;
2542 struct ipr_sglist *sglist;
2543 struct scatterlist *scatterlist;
2544 struct page *page;
2545
2546 /* Get the minimum size per scatter/gather element */
2547 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2548
2549 /* Get the actual size per element */
2550 order = get_order(sg_size);
2551
2552 /* Determine the actual number of bytes per element */
2553 bsize_elem = PAGE_SIZE * (1 << order);
2554
2555 /* Determine the actual number of sg entries needed */
2556 if (buf_len % bsize_elem)
2557 num_elem = (buf_len / bsize_elem) + 1;
2558 else
2559 num_elem = buf_len / bsize_elem;
2560
2561 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2562 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2563 (sizeof(struct scatterlist) * (num_elem - 1)),
2564 GFP_KERNEL);
2565
2566 if (sglist == NULL) {
2567 ipr_trace;
2568 return NULL;
2569 }
2570
1da177e4
LT
2571 scatterlist = sglist->scatterlist;
2572
2573 sglist->order = order;
2574 sglist->num_sg = num_elem;
2575
2576 /* Allocate a bunch of sg elements */
2577 for (i = 0; i < num_elem; i++) {
2578 page = alloc_pages(GFP_KERNEL, order);
2579 if (!page) {
2580 ipr_trace;
2581
2582 /* Free up what we already allocated */
2583 for (j = i - 1; j >= 0; j--)
2584 __free_pages(scatterlist[j].page, order);
2585 kfree(sglist);
2586 return NULL;
2587 }
2588
2589 scatterlist[i].page = page;
2590 }
2591
2592 return sglist;
2593}
2594
2595/**
2596 * ipr_free_ucode_buffer - Frees a microcode download buffer
2597 * @p_dnld: scatter/gather list pointer
2598 *
2599 * Free a DMA'able ucode download buffer previously allocated with
2600 * ipr_alloc_ucode_buffer
2601 *
2602 * Return value:
2603 * nothing
2604 **/
2605static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2606{
2607 int i;
2608
2609 for (i = 0; i < sglist->num_sg; i++)
2610 __free_pages(sglist->scatterlist[i].page, sglist->order);
2611
2612 kfree(sglist);
2613}
2614
2615/**
2616 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2617 * @sglist: scatter/gather list pointer
2618 * @buffer: buffer pointer
2619 * @len: buffer length
2620 *
2621 * Copy a microcode image from a user buffer into a buffer allocated by
2622 * ipr_alloc_ucode_buffer
2623 *
2624 * Return value:
2625 * 0 on success / other on failure
2626 **/
2627static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2628 u8 *buffer, u32 len)
2629{
2630 int bsize_elem, i, result = 0;
2631 struct scatterlist *scatterlist;
2632 void *kaddr;
2633
2634 /* Determine the actual number of bytes per element */
2635 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2636
2637 scatterlist = sglist->scatterlist;
2638
2639 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2640 kaddr = kmap(scatterlist[i].page);
2641 memcpy(kaddr, buffer, bsize_elem);
2642 kunmap(scatterlist[i].page);
2643
2644 scatterlist[i].length = bsize_elem;
2645
2646 if (result != 0) {
2647 ipr_trace;
2648 return result;
2649 }
2650 }
2651
2652 if (len % bsize_elem) {
2653 kaddr = kmap(scatterlist[i].page);
2654 memcpy(kaddr, buffer, len % bsize_elem);
2655 kunmap(scatterlist[i].page);
2656
2657 scatterlist[i].length = len % bsize_elem;
2658 }
2659
2660 sglist->buffer_len = len;
2661 return result;
2662}
2663
2664/**
12baa420 2665 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2666 * @ipr_cmd: ipr command struct
2667 * @sglist: scatter/gather list
1da177e4 2668 *
12baa420 2669 * Builds a microcode download IOA data list (IOADL).
1da177e4 2670 *
1da177e4 2671 **/
12baa420 2672static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2673 struct ipr_sglist *sglist)
1da177e4 2674{
1da177e4
LT
2675 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2676 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2677 struct scatterlist *scatterlist = sglist->scatterlist;
2678 int i;
2679
12baa420 2680 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2681 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2682 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2683 ioarcb->write_ioadl_len =
2684 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2685
2686 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2687 ioadl[i].flags_and_data_len =
2688 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2689 ioadl[i].address =
2690 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2691 }
2692
12baa420 2693 ioadl[i-1].flags_and_data_len |=
2694 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2695}
2696
2697/**
2698 * ipr_update_ioa_ucode - Update IOA's microcode
2699 * @ioa_cfg: ioa config struct
2700 * @sglist: scatter/gather list
2701 *
2702 * Initiate an adapter reset to update the IOA's microcode
2703 *
2704 * Return value:
2705 * 0 on success / -EIO on failure
2706 **/
2707static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2708 struct ipr_sglist *sglist)
2709{
2710 unsigned long lock_flags;
2711
2712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2713
2714 if (ioa_cfg->ucode_sglist) {
2715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2716 dev_err(&ioa_cfg->pdev->dev,
2717 "Microcode download already in progress\n");
2718 return -EIO;
1da177e4 2719 }
12baa420 2720
2721 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2722 sglist->num_sg, DMA_TO_DEVICE);
2723
2724 if (!sglist->num_dma_sg) {
2725 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2726 dev_err(&ioa_cfg->pdev->dev,
2727 "Failed to map microcode download buffer!\n");
1da177e4
LT
2728 return -EIO;
2729 }
2730
12baa420 2731 ioa_cfg->ucode_sglist = sglist;
2732 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2735
2736 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2737 ioa_cfg->ucode_sglist = NULL;
2738 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2739 return 0;
2740}
2741
2742/**
2743 * ipr_store_update_fw - Update the firmware on the adapter
2744 * @class_dev: class_device struct
2745 * @buf: buffer
2746 * @count: buffer size
2747 *
2748 * This function will update the firmware on the adapter.
2749 *
2750 * Return value:
2751 * count on success / other on failure
2752 **/
2753static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2754 const char *buf, size_t count)
2755{
2756 struct Scsi_Host *shost = class_to_shost(class_dev);
2757 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2758 struct ipr_ucode_image_header *image_hdr;
2759 const struct firmware *fw_entry;
2760 struct ipr_sglist *sglist;
1da177e4
LT
2761 char fname[100];
2762 char *src;
2763 int len, result, dnld_size;
2764
2765 if (!capable(CAP_SYS_ADMIN))
2766 return -EACCES;
2767
2768 len = snprintf(fname, 99, "%s", buf);
2769 fname[len-1] = '\0';
2770
2771 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2772 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2773 return -EIO;
2774 }
2775
2776 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2777
2778 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2779 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2780 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2781 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2782 release_firmware(fw_entry);
2783 return -EINVAL;
2784 }
2785
2786 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2787 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2788 sglist = ipr_alloc_ucode_buffer(dnld_size);
2789
2790 if (!sglist) {
2791 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2792 release_firmware(fw_entry);
2793 return -ENOMEM;
2794 }
2795
2796 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2797
2798 if (result) {
2799 dev_err(&ioa_cfg->pdev->dev,
2800 "Microcode buffer copy to DMA buffer failed\n");
12baa420 2801 goto out;
1da177e4
LT
2802 }
2803
12baa420 2804 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 2805
12baa420 2806 if (!result)
2807 result = count;
2808out:
1da177e4
LT
2809 ipr_free_ucode_buffer(sglist);
2810 release_firmware(fw_entry);
12baa420 2811 return result;
1da177e4
LT
2812}
2813
2814static struct class_device_attribute ipr_update_fw_attr = {
2815 .attr = {
2816 .name = "update_fw",
2817 .mode = S_IWUSR,
2818 },
2819 .store = ipr_store_update_fw
2820};
2821
2822static struct class_device_attribute *ipr_ioa_attrs[] = {
2823 &ipr_fw_version_attr,
2824 &ipr_log_level_attr,
2825 &ipr_diagnostics_attr,
f37eb54b 2826 &ipr_ioa_state_attr,
1da177e4
LT
2827 &ipr_ioa_reset_attr,
2828 &ipr_update_fw_attr,
62275040 2829 &ipr_ioa_cache_attr,
1da177e4
LT
2830 NULL,
2831};
2832
2833#ifdef CONFIG_SCSI_IPR_DUMP
2834/**
2835 * ipr_read_dump - Dump the adapter
2836 * @kobj: kobject struct
2837 * @buf: buffer
2838 * @off: offset
2839 * @count: buffer size
2840 *
2841 * Return value:
2842 * number of bytes printed to buffer
2843 **/
2844static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2845 loff_t off, size_t count)
2846{
2847 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2848 struct Scsi_Host *shost = class_to_shost(cdev);
2849 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2850 struct ipr_dump *dump;
2851 unsigned long lock_flags = 0;
2852 char *src;
2853 int len;
2854 size_t rc = count;
2855
2856 if (!capable(CAP_SYS_ADMIN))
2857 return -EACCES;
2858
2859 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2860 dump = ioa_cfg->dump;
2861
2862 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2864 return 0;
2865 }
2866 kref_get(&dump->kref);
2867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2868
2869 if (off > dump->driver_dump.hdr.len) {
2870 kref_put(&dump->kref, ipr_release_dump);
2871 return 0;
2872 }
2873
2874 if (off + count > dump->driver_dump.hdr.len) {
2875 count = dump->driver_dump.hdr.len - off;
2876 rc = count;
2877 }
2878
2879 if (count && off < sizeof(dump->driver_dump)) {
2880 if (off + count > sizeof(dump->driver_dump))
2881 len = sizeof(dump->driver_dump) - off;
2882 else
2883 len = count;
2884 src = (u8 *)&dump->driver_dump + off;
2885 memcpy(buf, src, len);
2886 buf += len;
2887 off += len;
2888 count -= len;
2889 }
2890
2891 off -= sizeof(dump->driver_dump);
2892
2893 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2894 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2895 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2896 else
2897 len = count;
2898 src = (u8 *)&dump->ioa_dump + off;
2899 memcpy(buf, src, len);
2900 buf += len;
2901 off += len;
2902 count -= len;
2903 }
2904
2905 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2906
2907 while (count) {
2908 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2909 len = PAGE_ALIGN(off) - off;
2910 else
2911 len = count;
2912 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2913 src += off & ~PAGE_MASK;
2914 memcpy(buf, src, len);
2915 buf += len;
2916 off += len;
2917 count -= len;
2918 }
2919
2920 kref_put(&dump->kref, ipr_release_dump);
2921 return rc;
2922}
2923
2924/**
2925 * ipr_alloc_dump - Prepare for adapter dump
2926 * @ioa_cfg: ioa config struct
2927 *
2928 * Return value:
2929 * 0 on success / other on failure
2930 **/
2931static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2932{
2933 struct ipr_dump *dump;
2934 unsigned long lock_flags = 0;
2935
2936 ENTER;
0bc42e35 2937 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2938
2939 if (!dump) {
2940 ipr_err("Dump memory allocation failed\n");
2941 return -ENOMEM;
2942 }
2943
1da177e4
LT
2944 kref_init(&dump->kref);
2945 dump->ioa_cfg = ioa_cfg;
2946
2947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2948
2949 if (INACTIVE != ioa_cfg->sdt_state) {
2950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2951 kfree(dump);
2952 return 0;
2953 }
2954
2955 ioa_cfg->dump = dump;
2956 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2957 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2958 ioa_cfg->dump_taken = 1;
2959 schedule_work(&ioa_cfg->work_q);
2960 }
2961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2962
2963 LEAVE;
2964 return 0;
2965}
2966
2967/**
2968 * ipr_free_dump - Free adapter dump memory
2969 * @ioa_cfg: ioa config struct
2970 *
2971 * Return value:
2972 * 0 on success / other on failure
2973 **/
2974static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2975{
2976 struct ipr_dump *dump;
2977 unsigned long lock_flags = 0;
2978
2979 ENTER;
2980
2981 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2982 dump = ioa_cfg->dump;
2983 if (!dump) {
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985 return 0;
2986 }
2987
2988 ioa_cfg->dump = NULL;
2989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2990
2991 kref_put(&dump->kref, ipr_release_dump);
2992
2993 LEAVE;
2994 return 0;
2995}
2996
2997/**
2998 * ipr_write_dump - Setup dump state of adapter
2999 * @kobj: kobject struct
3000 * @buf: buffer
3001 * @off: offset
3002 * @count: buffer size
3003 *
3004 * Return value:
3005 * number of bytes printed to buffer
3006 **/
3007static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3008 loff_t off, size_t count)
3009{
3010 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3011 struct Scsi_Host *shost = class_to_shost(cdev);
3012 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3013 int rc;
3014
3015 if (!capable(CAP_SYS_ADMIN))
3016 return -EACCES;
3017
3018 if (buf[0] == '1')
3019 rc = ipr_alloc_dump(ioa_cfg);
3020 else if (buf[0] == '0')
3021 rc = ipr_free_dump(ioa_cfg);
3022 else
3023 return -EINVAL;
3024
3025 if (rc)
3026 return rc;
3027 else
3028 return count;
3029}
3030
3031static struct bin_attribute ipr_dump_attr = {
3032 .attr = {
3033 .name = "dump",
3034 .mode = S_IRUSR | S_IWUSR,
3035 },
3036 .size = 0,
3037 .read = ipr_read_dump,
3038 .write = ipr_write_dump
3039};
3040#else
3041static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3042#endif
3043
3044/**
3045 * ipr_change_queue_depth - Change the device's queue depth
3046 * @sdev: scsi device struct
3047 * @qdepth: depth to set
3048 *
3049 * Return value:
3050 * actual depth set
3051 **/
3052static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3053{
3054 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3055 return sdev->queue_depth;
3056}
3057
3058/**
3059 * ipr_change_queue_type - Change the device's queue type
3060 * @dsev: scsi device struct
3061 * @tag_type: type of tags to use
3062 *
3063 * Return value:
3064 * actual queue type set
3065 **/
3066static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3067{
3068 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3069 struct ipr_resource_entry *res;
3070 unsigned long lock_flags = 0;
3071
3072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3073 res = (struct ipr_resource_entry *)sdev->hostdata;
3074
3075 if (res) {
3076 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3077 /*
3078 * We don't bother quiescing the device here since the
3079 * adapter firmware does it for us.
3080 */
3081 scsi_set_tag_type(sdev, tag_type);
3082
3083 if (tag_type)
3084 scsi_activate_tcq(sdev, sdev->queue_depth);
3085 else
3086 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3087 } else
3088 tag_type = 0;
3089 } else
3090 tag_type = 0;
3091
3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093 return tag_type;
3094}
3095
3096/**
3097 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3098 * @dev: device struct
3099 * @buf: buffer
3100 *
3101 * Return value:
3102 * number of bytes printed to buffer
3103 **/
10523b3b 3104static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3105{
3106 struct scsi_device *sdev = to_scsi_device(dev);
3107 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3108 struct ipr_resource_entry *res;
3109 unsigned long lock_flags = 0;
3110 ssize_t len = -ENXIO;
3111
3112 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3113 res = (struct ipr_resource_entry *)sdev->hostdata;
3114 if (res)
3115 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 return len;
3118}
3119
3120static struct device_attribute ipr_adapter_handle_attr = {
3121 .attr = {
3122 .name = "adapter_handle",
3123 .mode = S_IRUSR,
3124 },
3125 .show = ipr_show_adapter_handle
3126};
3127
3128static struct device_attribute *ipr_dev_attrs[] = {
3129 &ipr_adapter_handle_attr,
3130 NULL,
3131};
3132
3133/**
3134 * ipr_biosparam - Return the HSC mapping
3135 * @sdev: scsi device struct
3136 * @block_device: block device pointer
3137 * @capacity: capacity of the device
3138 * @parm: Array containing returned HSC values.
3139 *
3140 * This function generates the HSC parms that fdisk uses.
3141 * We want to make sure we return something that places partitions
3142 * on 4k boundaries for best performance with the IOA.
3143 *
3144 * Return value:
3145 * 0 on success
3146 **/
3147static int ipr_biosparam(struct scsi_device *sdev,
3148 struct block_device *block_device,
3149 sector_t capacity, int *parm)
3150{
3151 int heads, sectors;
3152 sector_t cylinders;
3153
3154 heads = 128;
3155 sectors = 32;
3156
3157 cylinders = capacity;
3158 sector_div(cylinders, (128 * 32));
3159
3160 /* return result */
3161 parm[0] = heads;
3162 parm[1] = sectors;
3163 parm[2] = cylinders;
3164
3165 return 0;
3166}
3167
3168/**
3169 * ipr_slave_destroy - Unconfigure a SCSI device
3170 * @sdev: scsi device struct
3171 *
3172 * Return value:
3173 * nothing
3174 **/
3175static void ipr_slave_destroy(struct scsi_device *sdev)
3176{
3177 struct ipr_resource_entry *res;
3178 struct ipr_ioa_cfg *ioa_cfg;
3179 unsigned long lock_flags = 0;
3180
3181 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3182
3183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3184 res = (struct ipr_resource_entry *) sdev->hostdata;
3185 if (res) {
3186 sdev->hostdata = NULL;
3187 res->sdev = NULL;
3188 }
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190}
3191
3192/**
3193 * ipr_slave_configure - Configure a SCSI device
3194 * @sdev: scsi device struct
3195 *
3196 * This function configures the specified scsi device.
3197 *
3198 * Return value:
3199 * 0 on success
3200 **/
3201static int ipr_slave_configure(struct scsi_device *sdev)
3202{
3203 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3204 struct ipr_resource_entry *res;
3205 unsigned long lock_flags = 0;
3206
3207 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3208 res = sdev->hostdata;
3209 if (res) {
3210 if (ipr_is_af_dasd_device(res))
3211 sdev->type = TYPE_RAID;
0726ce26 3212 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3213 sdev->scsi_level = 4;
0726ce26 3214 sdev->no_uld_attach = 1;
3215 }
1da177e4
LT
3216 if (ipr_is_vset_device(res)) {
3217 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3218 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3219 }
e4fbf44e 3220 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4
LT
3221 sdev->allow_restart = 1;
3222 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3223 }
3224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 return 0;
3226}
3227
3228/**
3229 * ipr_slave_alloc - Prepare for commands to a device.
3230 * @sdev: scsi device struct
3231 *
3232 * This function saves a pointer to the resource entry
3233 * in the scsi device struct if the device exists. We
3234 * can then use this pointer in ipr_queuecommand when
3235 * handling new commands.
3236 *
3237 * Return value:
692aebfc 3238 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3239 **/
3240static int ipr_slave_alloc(struct scsi_device *sdev)
3241{
3242 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3243 struct ipr_resource_entry *res;
3244 unsigned long lock_flags;
692aebfc 3245 int rc = -ENXIO;
1da177e4
LT
3246
3247 sdev->hostdata = NULL;
3248
3249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250
3251 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3252 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3253 (res->cfgte.res_addr.target == sdev->id) &&
3254 (res->cfgte.res_addr.lun == sdev->lun)) {
3255 res->sdev = sdev;
3256 res->add_to_ml = 0;
3257 res->in_erp = 0;
3258 sdev->hostdata = res;
ee0a90fa 3259 if (!ipr_is_naca_model(res))
3260 res->needs_sync_complete = 1;
692aebfc 3261 rc = 0;
1da177e4
LT
3262 break;
3263 }
3264 }
3265
3266 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267
692aebfc 3268 return rc;
1da177e4
LT
3269}
3270
3271/**
3272 * ipr_eh_host_reset - Reset the host adapter
3273 * @scsi_cmd: scsi command struct
3274 *
3275 * Return value:
3276 * SUCCESS / FAILED
3277 **/
df0ae249 3278static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3279{
3280 struct ipr_ioa_cfg *ioa_cfg;
3281 int rc;
3282
3283 ENTER;
3284 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3285
3286 dev_err(&ioa_cfg->pdev->dev,
3287 "Adapter being reset as a result of error recovery.\n");
3288
3289 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3290 ioa_cfg->sdt_state = GET_DUMP;
3291
3292 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3293
3294 LEAVE;
3295 return rc;
3296}
3297
df0ae249
JG
3298static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3299{
3300 int rc;
3301
3302 spin_lock_irq(cmd->device->host->host_lock);
3303 rc = __ipr_eh_host_reset(cmd);
3304 spin_unlock_irq(cmd->device->host->host_lock);
3305
3306 return rc;
3307}
3308
c6513096
BK
3309/**
3310 * ipr_device_reset - Reset the device
3311 * @ioa_cfg: ioa config struct
3312 * @res: resource entry struct
3313 *
3314 * This function issues a device reset to the affected device.
3315 * If the device is a SCSI device, a LUN reset will be sent
3316 * to the device first. If that does not work, a target reset
3317 * will be sent.
3318 *
3319 * Return value:
3320 * 0 on success / non-zero on failure
3321 **/
3322static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3323 struct ipr_resource_entry *res)
3324{
3325 struct ipr_cmnd *ipr_cmd;
3326 struct ipr_ioarcb *ioarcb;
3327 struct ipr_cmd_pkt *cmd_pkt;
3328 u32 ioasc;
3329
3330 ENTER;
3331 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3332 ioarcb = &ipr_cmd->ioarcb;
3333 cmd_pkt = &ioarcb->cmd_pkt;
3334
3335 ioarcb->res_handle = res->cfgte.res_handle;
3336 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3337 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3338
3339 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3340 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3341 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3342
3343 LEAVE;
3344 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3345}
3346
1da177e4
LT
3347/**
3348 * ipr_eh_dev_reset - Reset the device
3349 * @scsi_cmd: scsi command struct
3350 *
3351 * This function issues a device reset to the affected device.
3352 * A LUN reset will be sent to the device first. If that does
3353 * not work, a target reset will be sent.
3354 *
3355 * Return value:
3356 * SUCCESS / FAILED
3357 **/
94d0e7b8 3358static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3359{
3360 struct ipr_cmnd *ipr_cmd;
3361 struct ipr_ioa_cfg *ioa_cfg;
3362 struct ipr_resource_entry *res;
c6513096 3363 int rc;
1da177e4
LT
3364
3365 ENTER;
3366 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3367 res = scsi_cmd->device->hostdata;
3368
eeb88307 3369 if (!res)
1da177e4
LT
3370 return FAILED;
3371
3372 /*
3373 * If we are currently going through reset/reload, return failed. This will force the
3374 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3375 * reset to complete
3376 */
3377 if (ioa_cfg->in_reset_reload)
3378 return FAILED;
3379 if (ioa_cfg->ioa_is_dead)
3380 return FAILED;
3381
3382 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3383 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3384 if (ipr_cmd->scsi_cmd)
3385 ipr_cmd->done = ipr_scsi_eh_done;
3386 }
3387 }
3388
3389 res->resetting_device = 1;
fb3ed3cb 3390 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
c6513096 3391 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3392 res->resetting_device = 0;
3393
1da177e4 3394 LEAVE;
c6513096 3395 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3396}
3397
94d0e7b8
JG
3398static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3399{
3400 int rc;
3401
3402 spin_lock_irq(cmd->device->host->host_lock);
3403 rc = __ipr_eh_dev_reset(cmd);
3404 spin_unlock_irq(cmd->device->host->host_lock);
3405
3406 return rc;
3407}
3408
1da177e4
LT
3409/**
3410 * ipr_bus_reset_done - Op done function for bus reset.
3411 * @ipr_cmd: ipr command struct
3412 *
3413 * This function is the op done function for a bus reset
3414 *
3415 * Return value:
3416 * none
3417 **/
3418static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3419{
3420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3421 struct ipr_resource_entry *res;
3422
3423 ENTER;
3424 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3425 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3426 sizeof(res->cfgte.res_handle))) {
3427 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3428 break;
3429 }
3430 }
3431
3432 /*
3433 * If abort has not completed, indicate the reset has, else call the
3434 * abort's done function to wake the sleeping eh thread
3435 */
3436 if (ipr_cmd->sibling->sibling)
3437 ipr_cmd->sibling->sibling = NULL;
3438 else
3439 ipr_cmd->sibling->done(ipr_cmd->sibling);
3440
3441 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3442 LEAVE;
3443}
3444
3445/**
3446 * ipr_abort_timeout - An abort task has timed out
3447 * @ipr_cmd: ipr command struct
3448 *
3449 * This function handles when an abort task times out. If this
3450 * happens we issue a bus reset since we have resources tied
3451 * up that must be freed before returning to the midlayer.
3452 *
3453 * Return value:
3454 * none
3455 **/
3456static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3457{
3458 struct ipr_cmnd *reset_cmd;
3459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3460 struct ipr_cmd_pkt *cmd_pkt;
3461 unsigned long lock_flags = 0;
3462
3463 ENTER;
3464 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3465 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3467 return;
3468 }
3469
fb3ed3cb 3470 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
3471 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3472 ipr_cmd->sibling = reset_cmd;
3473 reset_cmd->sibling = ipr_cmd;
3474 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3475 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3476 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3477 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3478 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3479
3480 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 LEAVE;
3483}
3484
3485/**
3486 * ipr_cancel_op - Cancel specified op
3487 * @scsi_cmd: scsi command struct
3488 *
3489 * This function cancels specified op.
3490 *
3491 * Return value:
3492 * SUCCESS / FAILED
3493 **/
3494static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3495{
3496 struct ipr_cmnd *ipr_cmd;
3497 struct ipr_ioa_cfg *ioa_cfg;
3498 struct ipr_resource_entry *res;
3499 struct ipr_cmd_pkt *cmd_pkt;
3500 u32 ioasc;
3501 int op_found = 0;
3502
3503 ENTER;
3504 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3505 res = scsi_cmd->device->hostdata;
3506
8fa728a2
JG
3507 /* If we are currently going through reset/reload, return failed.
3508 * This will force the mid-layer to call ipr_eh_host_reset,
3509 * which will then go to sleep and wait for the reset to complete
3510 */
3511 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3512 return FAILED;
1da177e4
LT
3513 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3514 return FAILED;
3515
3516 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3517 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3518 ipr_cmd->done = ipr_scsi_eh_done;
3519 op_found = 1;
3520 break;
3521 }
3522 }
3523
3524 if (!op_found)
3525 return SUCCESS;
3526
3527 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3528 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3529 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3530 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3531 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3532 ipr_cmd->u.sdev = scsi_cmd->device;
3533
fb3ed3cb
BK
3534 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3535 scsi_cmd->cmnd[0]);
1da177e4
LT
3536 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3537 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3538
3539 /*
3540 * If the abort task timed out and we sent a bus reset, we will get
3541 * one the following responses to the abort
3542 */
3543 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3544 ioasc = 0;
3545 ipr_trace;
3546 }
3547
3548 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 3549 if (!ipr_is_naca_model(res))
3550 res->needs_sync_complete = 1;
1da177e4
LT
3551
3552 LEAVE;
3553 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3554}
3555
3556/**
3557 * ipr_eh_abort - Abort a single op
3558 * @scsi_cmd: scsi command struct
3559 *
3560 * Return value:
3561 * SUCCESS / FAILED
3562 **/
3563static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3564{
8fa728a2
JG
3565 unsigned long flags;
3566 int rc;
1da177e4
LT
3567
3568 ENTER;
1da177e4 3569
8fa728a2
JG
3570 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3571 rc = ipr_cancel_op(scsi_cmd);
3572 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3573
3574 LEAVE;
8fa728a2 3575 return rc;
1da177e4
LT
3576}
3577
3578/**
3579 * ipr_handle_other_interrupt - Handle "other" interrupts
3580 * @ioa_cfg: ioa config struct
3581 * @int_reg: interrupt register
3582 *
3583 * Return value:
3584 * IRQ_NONE / IRQ_HANDLED
3585 **/
3586static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3587 volatile u32 int_reg)
3588{
3589 irqreturn_t rc = IRQ_HANDLED;
3590
3591 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3592 /* Mask the interrupt */
3593 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3594
3595 /* Clear the interrupt */
3596 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3597 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3598
3599 list_del(&ioa_cfg->reset_cmd->queue);
3600 del_timer(&ioa_cfg->reset_cmd->timer);
3601 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3602 } else {
3603 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3604 ioa_cfg->ioa_unit_checked = 1;
3605 else
3606 dev_err(&ioa_cfg->pdev->dev,
3607 "Permanent IOA failure. 0x%08X\n", int_reg);
3608
3609 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3610 ioa_cfg->sdt_state = GET_DUMP;
3611
3612 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3613 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3614 }
3615
3616 return rc;
3617}
3618
3619/**
3620 * ipr_isr - Interrupt service routine
3621 * @irq: irq number
3622 * @devp: pointer to ioa config struct
3623 * @regs: pt_regs struct
3624 *
3625 * Return value:
3626 * IRQ_NONE / IRQ_HANDLED
3627 **/
3628static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3629{
3630 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3631 unsigned long lock_flags = 0;
3632 volatile u32 int_reg, int_mask_reg;
3633 u32 ioasc;
3634 u16 cmd_index;
3635 struct ipr_cmnd *ipr_cmd;
3636 irqreturn_t rc = IRQ_NONE;
3637
3638 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3639
3640 /* If interrupts are disabled, ignore the interrupt */
3641 if (!ioa_cfg->allow_interrupts) {
3642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643 return IRQ_NONE;
3644 }
3645
3646 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3647 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3648
3649 /* If an interrupt on the adapter did not occur, ignore it */
3650 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3652 return IRQ_NONE;
3653 }
3654
3655 while (1) {
3656 ipr_cmd = NULL;
3657
3658 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3659 ioa_cfg->toggle_bit) {
3660
3661 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3662 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3663
3664 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3665 ioa_cfg->errors_logged++;
3666 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3667
3668 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3669 ioa_cfg->sdt_state = GET_DUMP;
3670
3671 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3672 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3673 return IRQ_HANDLED;
3674 }
3675
3676 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3677
3678 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3679
3680 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3681
3682 list_del(&ipr_cmd->queue);
3683 del_timer(&ipr_cmd->timer);
3684 ipr_cmd->done(ipr_cmd);
3685
3686 rc = IRQ_HANDLED;
3687
3688 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3689 ioa_cfg->hrrq_curr++;
3690 } else {
3691 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3692 ioa_cfg->toggle_bit ^= 1u;
3693 }
3694 }
3695
3696 if (ipr_cmd != NULL) {
3697 /* Clear the PCI interrupt */
3698 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3699 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3700 } else
3701 break;
3702 }
3703
3704 if (unlikely(rc == IRQ_NONE))
3705 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3706
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708 return rc;
3709}
3710
3711/**
3712 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3713 * @ioa_cfg: ioa config struct
3714 * @ipr_cmd: ipr command struct
3715 *
3716 * Return value:
3717 * 0 on success / -1 on failure
3718 **/
3719static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3720 struct ipr_cmnd *ipr_cmd)
3721{
3722 int i;
3723 struct scatterlist *sglist;
3724 u32 length;
3725 u32 ioadl_flags = 0;
3726 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3727 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3728 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3729
3730 length = scsi_cmd->request_bufflen;
3731
3732 if (length == 0)
3733 return 0;
3734
3735 if (scsi_cmd->use_sg) {
3736 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3737 scsi_cmd->request_buffer,
3738 scsi_cmd->use_sg,
3739 scsi_cmd->sc_data_direction);
3740
3741 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3742 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3743 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3744 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3745 ioarcb->write_ioadl_len =
3746 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3747 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748 ioadl_flags = IPR_IOADL_FLAGS_READ;
3749 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750 ioarcb->read_ioadl_len =
3751 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3752 }
3753
3754 sglist = scsi_cmd->request_buffer;
3755
3756 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3757 ioadl[i].flags_and_data_len =
3758 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3759 ioadl[i].address =
3760 cpu_to_be32(sg_dma_address(&sglist[i]));
3761 }
3762
3763 if (likely(ipr_cmd->dma_use_sg)) {
3764 ioadl[i-1].flags_and_data_len |=
3765 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3766 return 0;
3767 } else
3768 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3769 } else {
3770 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3771 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3772 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3773 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3774 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3775 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3776 ioadl_flags = IPR_IOADL_FLAGS_READ;
3777 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3778 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3779 }
3780
3781 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3782 scsi_cmd->request_buffer, length,
3783 scsi_cmd->sc_data_direction);
3784
3785 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3786 ipr_cmd->dma_use_sg = 1;
3787 ioadl[0].flags_and_data_len =
3788 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3789 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3790 return 0;
3791 } else
3792 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3793 }
3794
3795 return -1;
3796}
3797
3798/**
3799 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3800 * @scsi_cmd: scsi command struct
3801 *
3802 * Return value:
3803 * task attributes
3804 **/
3805static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3806{
3807 u8 tag[2];
3808 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3809
3810 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3811 switch (tag[0]) {
3812 case MSG_SIMPLE_TAG:
3813 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3814 break;
3815 case MSG_HEAD_TAG:
3816 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3817 break;
3818 case MSG_ORDERED_TAG:
3819 rc = IPR_FLAGS_LO_ORDERED_TASK;
3820 break;
3821 };
3822 }
3823
3824 return rc;
3825}
3826
3827/**
3828 * ipr_erp_done - Process completion of ERP for a device
3829 * @ipr_cmd: ipr command struct
3830 *
3831 * This function copies the sense buffer into the scsi_cmd
3832 * struct and pushes the scsi_done function.
3833 *
3834 * Return value:
3835 * nothing
3836 **/
3837static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3838{
3839 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3840 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3842 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3843
3844 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3845 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
3846 scmd_printk(KERN_ERR, scsi_cmd,
3847 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
3848 } else {
3849 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3850 SCSI_SENSE_BUFFERSIZE);
3851 }
3852
3853 if (res) {
ee0a90fa 3854 if (!ipr_is_naca_model(res))
3855 res->needs_sync_complete = 1;
1da177e4
LT
3856 res->in_erp = 0;
3857 }
3858 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3859 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3860 scsi_cmd->scsi_done(scsi_cmd);
3861}
3862
3863/**
3864 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3865 * @ipr_cmd: ipr command struct
3866 *
3867 * Return value:
3868 * none
3869 **/
3870static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3871{
3872 struct ipr_ioarcb *ioarcb;
3873 struct ipr_ioasa *ioasa;
3874
3875 ioarcb = &ipr_cmd->ioarcb;
3876 ioasa = &ipr_cmd->ioasa;
3877
3878 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3879 ioarcb->write_data_transfer_length = 0;
3880 ioarcb->read_data_transfer_length = 0;
3881 ioarcb->write_ioadl_len = 0;
3882 ioarcb->read_ioadl_len = 0;
3883 ioasa->ioasc = 0;
3884 ioasa->residual_data_len = 0;
3885}
3886
3887/**
3888 * ipr_erp_request_sense - Send request sense to a device
3889 * @ipr_cmd: ipr command struct
3890 *
3891 * This function sends a request sense to a device as a result
3892 * of a check condition.
3893 *
3894 * Return value:
3895 * nothing
3896 **/
3897static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3898{
3899 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3900 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3901
3902 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3903 ipr_erp_done(ipr_cmd);
3904 return;
3905 }
3906
3907 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3908
3909 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3910 cmd_pkt->cdb[0] = REQUEST_SENSE;
3911 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3912 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3913 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3914 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3915
3916 ipr_cmd->ioadl[0].flags_and_data_len =
3917 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3918 ipr_cmd->ioadl[0].address =
3919 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3920
3921 ipr_cmd->ioarcb.read_ioadl_len =
3922 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3923 ipr_cmd->ioarcb.read_data_transfer_length =
3924 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3925
3926 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3927 IPR_REQUEST_SENSE_TIMEOUT * 2);
3928}
3929
3930/**
3931 * ipr_erp_cancel_all - Send cancel all to a device
3932 * @ipr_cmd: ipr command struct
3933 *
3934 * This function sends a cancel all to a device to clear the
3935 * queue. If we are running TCQ on the device, QERR is set to 1,
3936 * which means all outstanding ops have been dropped on the floor.
3937 * Cancel all will return them to us.
3938 *
3939 * Return value:
3940 * nothing
3941 **/
3942static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3943{
3944 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3945 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3946 struct ipr_cmd_pkt *cmd_pkt;
3947
3948 res->in_erp = 1;
3949
3950 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3951
3952 if (!scsi_get_tag_type(scsi_cmd->device)) {
3953 ipr_erp_request_sense(ipr_cmd);
3954 return;
3955 }
3956
3957 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3958 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3959 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3960
3961 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3962 IPR_CANCEL_ALL_TIMEOUT);
3963}
3964
3965/**
3966 * ipr_dump_ioasa - Dump contents of IOASA
3967 * @ioa_cfg: ioa config struct
3968 * @ipr_cmd: ipr command struct
fe964d0a 3969 * @res: resource entry struct
1da177e4
LT
3970 *
3971 * This function is invoked by the interrupt handler when ops
3972 * fail. It will log the IOASA if appropriate. Only called
3973 * for GPDD ops.
3974 *
3975 * Return value:
3976 * none
3977 **/
3978static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 3979 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
3980{
3981 int i;
3982 u16 data_len;
3983 u32 ioasc;
3984 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3985 __be32 *ioasa_data = (__be32 *)ioasa;
3986 int error_index;
3987
3988 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3989
3990 if (0 == ioasc)
3991 return;
3992
3993 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3994 return;
3995
3996 error_index = ipr_get_error(ioasc);
3997
3998 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3999 /* Don't log an error if the IOA already logged one */
4000 if (ioasa->ilid != 0)
4001 return;
4002
4003 if (ipr_error_table[error_index].log_ioasa == 0)
4004 return;
4005 }
4006
fe964d0a 4007 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
4008
4009 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4010 data_len = sizeof(struct ipr_ioasa);
4011 else
4012 data_len = be16_to_cpu(ioasa->ret_stat_len);
4013
4014 ipr_err("IOASA Dump:\n");
4015
4016 for (i = 0; i < data_len / 4; i += 4) {
4017 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4018 be32_to_cpu(ioasa_data[i]),
4019 be32_to_cpu(ioasa_data[i+1]),
4020 be32_to_cpu(ioasa_data[i+2]),
4021 be32_to_cpu(ioasa_data[i+3]));
4022 }
4023}
4024
4025/**
4026 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4027 * @ioasa: IOASA
4028 * @sense_buf: sense data buffer
4029 *
4030 * Return value:
4031 * none
4032 **/
4033static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4034{
4035 u32 failing_lba;
4036 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4037 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4038 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4039 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4040
4041 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4042
4043 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4044 return;
4045
4046 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4047
4048 if (ipr_is_vset_device(res) &&
4049 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4050 ioasa->u.vset.failing_lba_hi != 0) {
4051 sense_buf[0] = 0x72;
4052 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4053 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4054 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4055
4056 sense_buf[7] = 12;
4057 sense_buf[8] = 0;
4058 sense_buf[9] = 0x0A;
4059 sense_buf[10] = 0x80;
4060
4061 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4062
4063 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4064 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4065 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4066 sense_buf[15] = failing_lba & 0x000000ff;
4067
4068 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4069
4070 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4071 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4072 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4073 sense_buf[19] = failing_lba & 0x000000ff;
4074 } else {
4075 sense_buf[0] = 0x70;
4076 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4077 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4078 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4079
4080 /* Illegal request */
4081 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4082 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4083 sense_buf[7] = 10; /* additional length */
4084
4085 /* IOARCB was in error */
4086 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4087 sense_buf[15] = 0xC0;
4088 else /* Parameter data was invalid */
4089 sense_buf[15] = 0x80;
4090
4091 sense_buf[16] =
4092 ((IPR_FIELD_POINTER_MASK &
4093 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4094 sense_buf[17] =
4095 (IPR_FIELD_POINTER_MASK &
4096 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4097 } else {
4098 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4099 if (ipr_is_vset_device(res))
4100 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4101 else
4102 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4103
4104 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4105 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4106 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4107 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4108 sense_buf[6] = failing_lba & 0x000000ff;
4109 }
4110
4111 sense_buf[7] = 6; /* additional length */
4112 }
4113 }
4114}
4115
ee0a90fa 4116/**
4117 * ipr_get_autosense - Copy autosense data to sense buffer
4118 * @ipr_cmd: ipr command struct
4119 *
4120 * This function copies the autosense buffer to the buffer
4121 * in the scsi_cmd, if there is autosense available.
4122 *
4123 * Return value:
4124 * 1 if autosense was available / 0 if not
4125 **/
4126static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4127{
4128 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4129
4130 if ((be32_to_cpu(ioasa->ioasc_specific) &
4131 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4132 return 0;
4133
4134 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4135 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4136 SCSI_SENSE_BUFFERSIZE));
4137 return 1;
4138}
4139
1da177e4
LT
4140/**
4141 * ipr_erp_start - Process an error response for a SCSI op
4142 * @ioa_cfg: ioa config struct
4143 * @ipr_cmd: ipr command struct
4144 *
4145 * This function determines whether or not to initiate ERP
4146 * on the affected device.
4147 *
4148 * Return value:
4149 * nothing
4150 **/
4151static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4152 struct ipr_cmnd *ipr_cmd)
4153{
4154 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4155 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4156 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4157
4158 if (!res) {
4159 ipr_scsi_eh_done(ipr_cmd);
4160 return;
4161 }
4162
4163 if (ipr_is_gscsi(res))
fe964d0a 4164 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
1da177e4
LT
4165 else
4166 ipr_gen_sense(ipr_cmd);
4167
4168 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4169 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 4170 if (ipr_is_naca_model(res))
4171 scsi_cmd->result |= (DID_ABORT << 16);
4172 else
4173 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4174 break;
4175 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4176 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4177 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4178 break;
4179 case IPR_IOASC_HW_SEL_TIMEOUT:
4180 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 4181 if (!ipr_is_naca_model(res))
4182 res->needs_sync_complete = 1;
1da177e4
LT
4183 break;
4184 case IPR_IOASC_SYNC_REQUIRED:
4185 if (!res->in_erp)
4186 res->needs_sync_complete = 1;
4187 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4188 break;
4189 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4190 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4191 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4192 break;
4193 case IPR_IOASC_BUS_WAS_RESET:
4194 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4195 /*
4196 * Report the bus reset and ask for a retry. The device
4197 * will give CC/UA the next command.
4198 */
4199 if (!res->resetting_device)
4200 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4201 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4202 if (!ipr_is_naca_model(res))
4203 res->needs_sync_complete = 1;
1da177e4
LT
4204 break;
4205 case IPR_IOASC_HW_DEV_BUS_STATUS:
4206 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4207 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 4208 if (!ipr_get_autosense(ipr_cmd)) {
4209 if (!ipr_is_naca_model(res)) {
4210 ipr_erp_cancel_all(ipr_cmd);
4211 return;
4212 }
4213 }
1da177e4 4214 }
ee0a90fa 4215 if (!ipr_is_naca_model(res))
4216 res->needs_sync_complete = 1;
1da177e4
LT
4217 break;
4218 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4219 break;
4220 default:
5b7304fb
BK
4221 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4222 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4223 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4224 res->needs_sync_complete = 1;
4225 break;
4226 }
4227
4228 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4229 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4230 scsi_cmd->scsi_done(scsi_cmd);
4231}
4232
4233/**
4234 * ipr_scsi_done - mid-layer done function
4235 * @ipr_cmd: ipr command struct
4236 *
4237 * This function is invoked by the interrupt handler for
4238 * ops generated by the SCSI mid-layer
4239 *
4240 * Return value:
4241 * none
4242 **/
4243static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4244{
4245 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4246 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4247 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4248
4249 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4250
4251 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4252 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4253 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4254 scsi_cmd->scsi_done(scsi_cmd);
4255 } else
4256 ipr_erp_start(ioa_cfg, ipr_cmd);
4257}
4258
1da177e4
LT
4259/**
4260 * ipr_queuecommand - Queue a mid-layer request
4261 * @scsi_cmd: scsi command struct
4262 * @done: done function
4263 *
4264 * This function queues a request generated by the mid-layer.
4265 *
4266 * Return value:
4267 * 0 on success
4268 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4269 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4270 **/
4271static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4272 void (*done) (struct scsi_cmnd *))
4273{
4274 struct ipr_ioa_cfg *ioa_cfg;
4275 struct ipr_resource_entry *res;
4276 struct ipr_ioarcb *ioarcb;
4277 struct ipr_cmnd *ipr_cmd;
4278 int rc = 0;
4279
4280 scsi_cmd->scsi_done = done;
4281 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4282 res = scsi_cmd->device->hostdata;
4283 scsi_cmd->result = (DID_OK << 16);
4284
4285 /*
4286 * We are currently blocking all devices due to a host reset
4287 * We have told the host to stop giving us new requests, but
4288 * ERP ops don't count. FIXME
4289 */
4290 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4291 return SCSI_MLQUEUE_HOST_BUSY;
4292
4293 /*
4294 * FIXME - Create scsi_set_host_offline interface
4295 * and the ioa_is_dead check can be removed
4296 */
4297 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4298 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4299 scsi_cmd->result = (DID_NO_CONNECT << 16);
4300 scsi_cmd->scsi_done(scsi_cmd);
4301 return 0;
4302 }
4303
4304 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4305 ioarcb = &ipr_cmd->ioarcb;
4306 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4307
4308 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4309 ipr_cmd->scsi_cmd = scsi_cmd;
4310 ioarcb->res_handle = res->cfgte.res_handle;
4311 ipr_cmd->done = ipr_scsi_done;
4312 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4313
4314 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4315 if (scsi_cmd->underflow == 0)
4316 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4317
4318 if (res->needs_sync_complete) {
4319 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4320 res->needs_sync_complete = 0;
4321 }
4322
4323 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4324 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4325 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4326 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4327 }
4328
4329 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4330 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4331 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4332
1da177e4
LT
4333 if (likely(rc == 0))
4334 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4335
4336 if (likely(rc == 0)) {
4337 mb();
4338 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4339 ioa_cfg->regs.ioarrin_reg);
4340 } else {
4341 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4342 return SCSI_MLQUEUE_HOST_BUSY;
4343 }
4344
4345 return 0;
4346}
4347
4348/**
4349 * ipr_info - Get information about the card/driver
4350 * @scsi_host: scsi host struct
4351 *
4352 * Return value:
4353 * pointer to buffer with description string
4354 **/
4355static const char * ipr_ioa_info(struct Scsi_Host *host)
4356{
4357 static char buffer[512];
4358 struct ipr_ioa_cfg *ioa_cfg;
4359 unsigned long lock_flags = 0;
4360
4361 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4362
4363 spin_lock_irqsave(host->host_lock, lock_flags);
4364 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4365 spin_unlock_irqrestore(host->host_lock, lock_flags);
4366
4367 return buffer;
4368}
4369
4370static struct scsi_host_template driver_template = {
4371 .module = THIS_MODULE,
4372 .name = "IPR",
4373 .info = ipr_ioa_info,
4374 .queuecommand = ipr_queuecommand,
4375 .eh_abort_handler = ipr_eh_abort,
4376 .eh_device_reset_handler = ipr_eh_dev_reset,
4377 .eh_host_reset_handler = ipr_eh_host_reset,
4378 .slave_alloc = ipr_slave_alloc,
4379 .slave_configure = ipr_slave_configure,
4380 .slave_destroy = ipr_slave_destroy,
4381 .change_queue_depth = ipr_change_queue_depth,
4382 .change_queue_type = ipr_change_queue_type,
4383 .bios_param = ipr_biosparam,
4384 .can_queue = IPR_MAX_COMMANDS,
4385 .this_id = -1,
4386 .sg_tablesize = IPR_MAX_SGLIST,
4387 .max_sectors = IPR_IOA_MAX_SECTORS,
4388 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4389 .use_clustering = ENABLE_CLUSTERING,
4390 .shost_attrs = ipr_ioa_attrs,
4391 .sdev_attrs = ipr_dev_attrs,
4392 .proc_name = IPR_NAME
4393};
4394
4395#ifdef CONFIG_PPC_PSERIES
4396static const u16 ipr_blocked_processors[] = {
4397 PV_NORTHSTAR,
4398 PV_PULSAR,
4399 PV_POWER4,
4400 PV_ICESTAR,
4401 PV_SSTAR,
4402 PV_POWER4p,
4403 PV_630,
4404 PV_630p
4405};
4406
4407/**
4408 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4409 * @ioa_cfg: ioa cfg struct
4410 *
4411 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4412 * certain pSeries hardware. This function determines if the given
4413 * adapter is in one of these confgurations or not.
4414 *
4415 * Return value:
4416 * 1 if adapter is not supported / 0 if adapter is supported
4417 **/
4418static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4419{
4420 u8 rev_id;
4421 int i;
4422
4423 if (ioa_cfg->type == 0x5702) {
4424 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4425 &rev_id) == PCIBIOS_SUCCESSFUL) {
4426 if (rev_id < 4) {
4427 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4428 if (__is_processor(ipr_blocked_processors[i]))
4429 return 1;
4430 }
4431 }
4432 }
4433 }
4434 return 0;
4435}
4436#else
4437#define ipr_invalid_adapter(ioa_cfg) 0
4438#endif
4439
4440/**
4441 * ipr_ioa_bringdown_done - IOA bring down completion.
4442 * @ipr_cmd: ipr command struct
4443 *
4444 * This function processes the completion of an adapter bring down.
4445 * It wakes any reset sleepers.
4446 *
4447 * Return value:
4448 * IPR_RC_JOB_RETURN
4449 **/
4450static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4451{
4452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4453
4454 ENTER;
4455 ioa_cfg->in_reset_reload = 0;
4456 ioa_cfg->reset_retries = 0;
4457 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4458 wake_up_all(&ioa_cfg->reset_wait_q);
4459
4460 spin_unlock_irq(ioa_cfg->host->host_lock);
4461 scsi_unblock_requests(ioa_cfg->host);
4462 spin_lock_irq(ioa_cfg->host->host_lock);
4463 LEAVE;
4464
4465 return IPR_RC_JOB_RETURN;
4466}
4467
4468/**
4469 * ipr_ioa_reset_done - IOA reset completion.
4470 * @ipr_cmd: ipr command struct
4471 *
4472 * This function processes the completion of an adapter reset.
4473 * It schedules any necessary mid-layer add/removes and
4474 * wakes any reset sleepers.
4475 *
4476 * Return value:
4477 * IPR_RC_JOB_RETURN
4478 **/
4479static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4480{
4481 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4482 struct ipr_resource_entry *res;
4483 struct ipr_hostrcb *hostrcb, *temp;
4484 int i = 0;
4485
4486 ENTER;
4487 ioa_cfg->in_reset_reload = 0;
4488 ioa_cfg->allow_cmds = 1;
4489 ioa_cfg->reset_cmd = NULL;
3d1d0da6 4490 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
4491
4492 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4493 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4494 ipr_trace;
4495 break;
4496 }
4497 }
4498 schedule_work(&ioa_cfg->work_q);
4499
4500 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4501 list_del(&hostrcb->queue);
4502 if (i++ < IPR_NUM_LOG_HCAMS)
4503 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4504 else
4505 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4506 }
4507
4508 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4509
4510 ioa_cfg->reset_retries = 0;
4511 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4512 wake_up_all(&ioa_cfg->reset_wait_q);
4513
4514 spin_unlock_irq(ioa_cfg->host->host_lock);
4515 scsi_unblock_requests(ioa_cfg->host);
4516 spin_lock_irq(ioa_cfg->host->host_lock);
4517
4518 if (!ioa_cfg->allow_cmds)
4519 scsi_block_requests(ioa_cfg->host);
4520
4521 LEAVE;
4522 return IPR_RC_JOB_RETURN;
4523}
4524
4525/**
4526 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4527 * @supported_dev: supported device struct
4528 * @vpids: vendor product id struct
4529 *
4530 * Return value:
4531 * none
4532 **/
4533static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4534 struct ipr_std_inq_vpids *vpids)
4535{
4536 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4537 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4538 supported_dev->num_records = 1;
4539 supported_dev->data_length =
4540 cpu_to_be16(sizeof(struct ipr_supported_device));
4541 supported_dev->reserved = 0;
4542}
4543
4544/**
4545 * ipr_set_supported_devs - Send Set Supported Devices for a device
4546 * @ipr_cmd: ipr command struct
4547 *
4548 * This function send a Set Supported Devices to the adapter
4549 *
4550 * Return value:
4551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4552 **/
4553static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4554{
4555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4556 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4557 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4558 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4559 struct ipr_resource_entry *res = ipr_cmd->u.res;
4560
4561 ipr_cmd->job_step = ipr_ioa_reset_done;
4562
4563 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 4564 if (!ipr_is_scsi_disk(res))
1da177e4
LT
4565 continue;
4566
4567 ipr_cmd->u.res = res;
4568 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4569
4570 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4571 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4572 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4573
4574 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4575 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4576 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4577
4578 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4579 sizeof(struct ipr_supported_device));
4580 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4581 offsetof(struct ipr_misc_cbs, supp_dev));
4582 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4583 ioarcb->write_data_transfer_length =
4584 cpu_to_be32(sizeof(struct ipr_supported_device));
4585
4586 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4587 IPR_SET_SUP_DEVICE_TIMEOUT);
4588
4589 ipr_cmd->job_step = ipr_set_supported_devs;
4590 return IPR_RC_JOB_RETURN;
4591 }
4592
4593 return IPR_RC_JOB_CONTINUE;
4594}
4595
62275040 4596/**
4597 * ipr_setup_write_cache - Disable write cache if needed
4598 * @ipr_cmd: ipr command struct
4599 *
4600 * This function sets up adapters write cache to desired setting
4601 *
4602 * Return value:
4603 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4604 **/
4605static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4606{
4607 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4608
4609 ipr_cmd->job_step = ipr_set_supported_devs;
4610 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4611 struct ipr_resource_entry, queue);
4612
4613 if (ioa_cfg->cache_state != CACHE_DISABLED)
4614 return IPR_RC_JOB_CONTINUE;
4615
4616 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4617 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4618 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4619 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4620
4621 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4622
4623 return IPR_RC_JOB_RETURN;
4624}
4625
1da177e4
LT
4626/**
4627 * ipr_get_mode_page - Locate specified mode page
4628 * @mode_pages: mode page buffer
4629 * @page_code: page code to find
4630 * @len: minimum required length for mode page
4631 *
4632 * Return value:
4633 * pointer to mode page / NULL on failure
4634 **/
4635static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4636 u32 page_code, u32 len)
4637{
4638 struct ipr_mode_page_hdr *mode_hdr;
4639 u32 page_length;
4640 u32 length;
4641
4642 if (!mode_pages || (mode_pages->hdr.length == 0))
4643 return NULL;
4644
4645 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4646 mode_hdr = (struct ipr_mode_page_hdr *)
4647 (mode_pages->data + mode_pages->hdr.block_desc_len);
4648
4649 while (length) {
4650 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4651 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4652 return mode_hdr;
4653 break;
4654 } else {
4655 page_length = (sizeof(struct ipr_mode_page_hdr) +
4656 mode_hdr->page_length);
4657 length -= page_length;
4658 mode_hdr = (struct ipr_mode_page_hdr *)
4659 ((unsigned long)mode_hdr + page_length);
4660 }
4661 }
4662 return NULL;
4663}
4664
4665/**
4666 * ipr_check_term_power - Check for term power errors
4667 * @ioa_cfg: ioa config struct
4668 * @mode_pages: IOAFP mode pages buffer
4669 *
4670 * Check the IOAFP's mode page 28 for term power errors
4671 *
4672 * Return value:
4673 * nothing
4674 **/
4675static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4676 struct ipr_mode_pages *mode_pages)
4677{
4678 int i;
4679 int entry_length;
4680 struct ipr_dev_bus_entry *bus;
4681 struct ipr_mode_page28 *mode_page;
4682
4683 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4684 sizeof(struct ipr_mode_page28));
4685
4686 entry_length = mode_page->entry_length;
4687
4688 bus = mode_page->bus;
4689
4690 for (i = 0; i < mode_page->num_entries; i++) {
4691 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4692 dev_err(&ioa_cfg->pdev->dev,
4693 "Term power is absent on scsi bus %d\n",
4694 bus->res_addr.bus);
4695 }
4696
4697 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4698 }
4699}
4700
4701/**
4702 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4703 * @ioa_cfg: ioa config struct
4704 *
4705 * Looks through the config table checking for SES devices. If
4706 * the SES device is in the SES table indicating a maximum SCSI
4707 * bus speed, the speed is limited for the bus.
4708 *
4709 * Return value:
4710 * none
4711 **/
4712static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4713{
4714 u32 max_xfer_rate;
4715 int i;
4716
4717 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4718 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4719 ioa_cfg->bus_attr[i].bus_width);
4720
4721 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4722 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4723 }
4724}
4725
4726/**
4727 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4728 * @ioa_cfg: ioa config struct
4729 * @mode_pages: mode page 28 buffer
4730 *
4731 * Updates mode page 28 based on driver configuration
4732 *
4733 * Return value:
4734 * none
4735 **/
4736static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4737 struct ipr_mode_pages *mode_pages)
4738{
4739 int i, entry_length;
4740 struct ipr_dev_bus_entry *bus;
4741 struct ipr_bus_attributes *bus_attr;
4742 struct ipr_mode_page28 *mode_page;
4743
4744 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4745 sizeof(struct ipr_mode_page28));
4746
4747 entry_length = mode_page->entry_length;
4748
4749 /* Loop for each device bus entry */
4750 for (i = 0, bus = mode_page->bus;
4751 i < mode_page->num_entries;
4752 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4753 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4754 dev_err(&ioa_cfg->pdev->dev,
4755 "Invalid resource address reported: 0x%08X\n",
4756 IPR_GET_PHYS_LOC(bus->res_addr));
4757 continue;
4758 }
4759
4760 bus_attr = &ioa_cfg->bus_attr[i];
4761 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4762 bus->bus_width = bus_attr->bus_width;
4763 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4764 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4765 if (bus_attr->qas_enabled)
4766 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4767 else
4768 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4769 }
4770}
4771
4772/**
4773 * ipr_build_mode_select - Build a mode select command
4774 * @ipr_cmd: ipr command struct
4775 * @res_handle: resource handle to send command to
4776 * @parm: Byte 2 of Mode Sense command
4777 * @dma_addr: DMA buffer address
4778 * @xfer_len: data transfer length
4779 *
4780 * Return value:
4781 * none
4782 **/
4783static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4784 __be32 res_handle, u8 parm, u32 dma_addr,
4785 u8 xfer_len)
4786{
4787 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4788 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4789
4790 ioarcb->res_handle = res_handle;
4791 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4792 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4793 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4794 ioarcb->cmd_pkt.cdb[1] = parm;
4795 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4796
4797 ioadl->flags_and_data_len =
4798 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4799 ioadl->address = cpu_to_be32(dma_addr);
4800 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4801 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4802}
4803
4804/**
4805 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4806 * @ipr_cmd: ipr command struct
4807 *
4808 * This function sets up the SCSI bus attributes and sends
4809 * a Mode Select for Page 28 to activate them.
4810 *
4811 * Return value:
4812 * IPR_RC_JOB_RETURN
4813 **/
4814static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4815{
4816 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4817 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4818 int length;
4819
4820 ENTER;
4733804c
BK
4821 ipr_scsi_bus_speed_limit(ioa_cfg);
4822 ipr_check_term_power(ioa_cfg, mode_pages);
4823 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4824 length = mode_pages->hdr.length + 1;
4825 mode_pages->hdr.length = 0;
1da177e4
LT
4826
4827 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4828 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4829 length);
4830
62275040 4831 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
4832 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4833
4834 LEAVE;
4835 return IPR_RC_JOB_RETURN;
4836}
4837
4838/**
4839 * ipr_build_mode_sense - Builds a mode sense command
4840 * @ipr_cmd: ipr command struct
4841 * @res: resource entry struct
4842 * @parm: Byte 2 of mode sense command
4843 * @dma_addr: DMA address of mode sense buffer
4844 * @xfer_len: Size of DMA buffer
4845 *
4846 * Return value:
4847 * none
4848 **/
4849static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4850 __be32 res_handle,
4851 u8 parm, u32 dma_addr, u8 xfer_len)
4852{
4853 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4854 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4855
4856 ioarcb->res_handle = res_handle;
4857 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4858 ioarcb->cmd_pkt.cdb[2] = parm;
4859 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4860 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4861
4862 ioadl->flags_and_data_len =
4863 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4864 ioadl->address = cpu_to_be32(dma_addr);
4865 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4866 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4867}
4868
dfed823e 4869/**
4870 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4871 * @ipr_cmd: ipr command struct
4872 *
4873 * This function handles the failure of an IOA bringup command.
4874 *
4875 * Return value:
4876 * IPR_RC_JOB_RETURN
4877 **/
4878static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4879{
4880 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4881 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4882
4883 dev_err(&ioa_cfg->pdev->dev,
4884 "0x%02X failed with IOASC: 0x%08X\n",
4885 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4886
4887 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4889 return IPR_RC_JOB_RETURN;
4890}
4891
4892/**
4893 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4894 * @ipr_cmd: ipr command struct
4895 *
4896 * This function handles the failure of a Mode Sense to the IOAFP.
4897 * Some adapters do not handle all mode pages.
4898 *
4899 * Return value:
4900 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4901 **/
4902static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4903{
4904 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4905
4906 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4907 ipr_cmd->job_step = ipr_setup_write_cache;
4908 return IPR_RC_JOB_CONTINUE;
4909 }
4910
4911 return ipr_reset_cmd_failed(ipr_cmd);
4912}
4913
1da177e4
LT
4914/**
4915 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4916 * @ipr_cmd: ipr command struct
4917 *
4918 * This function send a Page 28 mode sense to the IOA to
4919 * retrieve SCSI bus attributes.
4920 *
4921 * Return value:
4922 * IPR_RC_JOB_RETURN
4923 **/
4924static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4925{
4926 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4927
4928 ENTER;
4929 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4930 0x28, ioa_cfg->vpd_cbs_dma +
4931 offsetof(struct ipr_misc_cbs, mode_pages),
4932 sizeof(struct ipr_mode_pages));
4933
4934 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 4935 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
4936
4937 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4938
4939 LEAVE;
4940 return IPR_RC_JOB_RETURN;
4941}
4942
4943/**
4944 * ipr_init_res_table - Initialize the resource table
4945 * @ipr_cmd: ipr command struct
4946 *
4947 * This function looks through the existing resource table, comparing
4948 * it with the config table. This function will take care of old/new
4949 * devices and schedule adding/removing them from the mid-layer
4950 * as appropriate.
4951 *
4952 * Return value:
4953 * IPR_RC_JOB_CONTINUE
4954 **/
4955static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4956{
4957 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4958 struct ipr_resource_entry *res, *temp;
4959 struct ipr_config_table_entry *cfgte;
4960 int found, i;
4961 LIST_HEAD(old_res);
4962
4963 ENTER;
4964 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4965 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4966
4967 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4968 list_move_tail(&res->queue, &old_res);
4969
4970 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4971 cfgte = &ioa_cfg->cfg_table->dev[i];
4972 found = 0;
4973
4974 list_for_each_entry_safe(res, temp, &old_res, queue) {
4975 if (!memcmp(&res->cfgte.res_addr,
4976 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4977 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4978 found = 1;
4979 break;
4980 }
4981 }
4982
4983 if (!found) {
4984 if (list_empty(&ioa_cfg->free_res_q)) {
4985 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4986 break;
4987 }
4988
4989 found = 1;
4990 res = list_entry(ioa_cfg->free_res_q.next,
4991 struct ipr_resource_entry, queue);
4992 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4993 ipr_init_res_entry(res);
4994 res->add_to_ml = 1;
4995 }
4996
4997 if (found)
4998 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4999 }
5000
5001 list_for_each_entry_safe(res, temp, &old_res, queue) {
5002 if (res->sdev) {
5003 res->del_from_ml = 1;
1121b794 5004 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
5005 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5006 } else {
5007 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5008 }
5009 }
5010
5011 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5012
5013 LEAVE;
5014 return IPR_RC_JOB_CONTINUE;
5015}
5016
5017/**
5018 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5019 * @ipr_cmd: ipr command struct
5020 *
5021 * This function sends a Query IOA Configuration command
5022 * to the adapter to retrieve the IOA configuration table.
5023 *
5024 * Return value:
5025 * IPR_RC_JOB_RETURN
5026 **/
5027static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5028{
5029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5030 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5031 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5032 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5033
5034 ENTER;
5035 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5036 ucode_vpd->major_release, ucode_vpd->card_type,
5037 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5038 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5040
5041 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5042 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5043 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5044
5045 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5046 ioarcb->read_data_transfer_length =
5047 cpu_to_be32(sizeof(struct ipr_config_table));
5048
5049 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5050 ioadl->flags_and_data_len =
5051 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5052
5053 ipr_cmd->job_step = ipr_init_res_table;
5054
5055 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5056
5057 LEAVE;
5058 return IPR_RC_JOB_RETURN;
5059}
5060
5061/**
5062 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5063 * @ipr_cmd: ipr command struct
5064 *
5065 * This utility function sends an inquiry to the adapter.
5066 *
5067 * Return value:
5068 * none
5069 **/
5070static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5071 u32 dma_addr, u8 xfer_len)
5072{
5073 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5074 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5075
5076 ENTER;
5077 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5078 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5079
5080 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5081 ioarcb->cmd_pkt.cdb[1] = flags;
5082 ioarcb->cmd_pkt.cdb[2] = page;
5083 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5084
5085 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5086 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5087
5088 ioadl->address = cpu_to_be32(dma_addr);
5089 ioadl->flags_and_data_len =
5090 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5091
5092 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5093 LEAVE;
5094}
5095
62275040 5096/**
5097 * ipr_inquiry_page_supported - Is the given inquiry page supported
5098 * @page0: inquiry page 0 buffer
5099 * @page: page code.
5100 *
5101 * This function determines if the specified inquiry page is supported.
5102 *
5103 * Return value:
5104 * 1 if page is supported / 0 if not
5105 **/
5106static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5107{
5108 int i;
5109
5110 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5111 if (page0->page[i] == page)
5112 return 1;
5113
5114 return 0;
5115}
5116
1da177e4
LT
5117/**
5118 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5119 * @ipr_cmd: ipr command struct
5120 *
5121 * This function sends a Page 3 inquiry to the adapter
5122 * to retrieve software VPD information.
5123 *
5124 * Return value:
5125 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5126 **/
5127static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 5128{
5129 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5130 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5131
5132 ENTER;
5133
5134 if (!ipr_inquiry_page_supported(page0, 1))
5135 ioa_cfg->cache_state = CACHE_NONE;
5136
5137 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5138
5139 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5140 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5141 sizeof(struct ipr_inquiry_page3));
5142
5143 LEAVE;
5144 return IPR_RC_JOB_RETURN;
5145}
5146
5147/**
5148 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5149 * @ipr_cmd: ipr command struct
5150 *
5151 * This function sends a Page 0 inquiry to the adapter
5152 * to retrieve supported inquiry pages.
5153 *
5154 * Return value:
5155 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5156 **/
5157static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
5158{
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5160 char type[5];
5161
5162 ENTER;
5163
5164 /* Grab the type out of the VPD and store it away */
5165 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5166 type[4] = '\0';
5167 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5168
62275040 5169 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 5170
62275040 5171 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5172 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5173 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
5174
5175 LEAVE;
5176 return IPR_RC_JOB_RETURN;
5177}
5178
5179/**
5180 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5181 * @ipr_cmd: ipr command struct
5182 *
5183 * This function sends a standard inquiry to the adapter.
5184 *
5185 * Return value:
5186 * IPR_RC_JOB_RETURN
5187 **/
5188static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5189{
5190 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5191
5192 ENTER;
62275040 5193 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
5194
5195 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5196 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5197 sizeof(struct ipr_ioa_vpd));
5198
5199 LEAVE;
5200 return IPR_RC_JOB_RETURN;
5201}
5202
5203/**
5204 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5205 * @ipr_cmd: ipr command struct
5206 *
5207 * This function send an Identify Host Request Response Queue
5208 * command to establish the HRRQ with the adapter.
5209 *
5210 * Return value:
5211 * IPR_RC_JOB_RETURN
5212 **/
5213static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5214{
5215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5216 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5217
5218 ENTER;
5219 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5220
5221 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5222 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5223
5224 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5225 ioarcb->cmd_pkt.cdb[2] =
5226 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5227 ioarcb->cmd_pkt.cdb[3] =
5228 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5229 ioarcb->cmd_pkt.cdb[4] =
5230 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5231 ioarcb->cmd_pkt.cdb[5] =
5232 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5233 ioarcb->cmd_pkt.cdb[7] =
5234 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5235 ioarcb->cmd_pkt.cdb[8] =
5236 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5237
5238 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5239
5240 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5241
5242 LEAVE;
5243 return IPR_RC_JOB_RETURN;
5244}
5245
5246/**
5247 * ipr_reset_timer_done - Adapter reset timer function
5248 * @ipr_cmd: ipr command struct
5249 *
5250 * Description: This function is used in adapter reset processing
5251 * for timing events. If the reset_cmd pointer in the IOA
5252 * config struct is not this adapter's we are doing nested
5253 * resets and fail_all_ops will take care of freeing the
5254 * command block.
5255 *
5256 * Return value:
5257 * none
5258 **/
5259static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5260{
5261 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5262 unsigned long lock_flags = 0;
5263
5264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5265
5266 if (ioa_cfg->reset_cmd == ipr_cmd) {
5267 list_del(&ipr_cmd->queue);
5268 ipr_cmd->done(ipr_cmd);
5269 }
5270
5271 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5272}
5273
5274/**
5275 * ipr_reset_start_timer - Start a timer for adapter reset job
5276 * @ipr_cmd: ipr command struct
5277 * @timeout: timeout value
5278 *
5279 * Description: This function is used in adapter reset processing
5280 * for timing events. If the reset_cmd pointer in the IOA
5281 * config struct is not this adapter's we are doing nested
5282 * resets and fail_all_ops will take care of freeing the
5283 * command block.
5284 *
5285 * Return value:
5286 * none
5287 **/
5288static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5289 unsigned long timeout)
5290{
5291 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5292 ipr_cmd->done = ipr_reset_ioa_job;
5293
5294 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5295 ipr_cmd->timer.expires = jiffies + timeout;
5296 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5297 add_timer(&ipr_cmd->timer);
5298}
5299
5300/**
5301 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5302 * @ioa_cfg: ioa cfg struct
5303 *
5304 * Return value:
5305 * nothing
5306 **/
5307static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5308{
5309 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5310
5311 /* Initialize Host RRQ pointers */
5312 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5313 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5314 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5315 ioa_cfg->toggle_bit = 1;
5316
5317 /* Zero out config table */
5318 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5319}
5320
5321/**
5322 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5323 * @ipr_cmd: ipr command struct
5324 *
5325 * This function reinitializes some control blocks and
5326 * enables destructive diagnostics on the adapter.
5327 *
5328 * Return value:
5329 * IPR_RC_JOB_RETURN
5330 **/
5331static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5332{
5333 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5334 volatile u32 int_reg;
5335
5336 ENTER;
5337 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5338 ipr_init_ioa_mem(ioa_cfg);
5339
5340 ioa_cfg->allow_interrupts = 1;
5341 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5342
5343 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5344 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5345 ioa_cfg->regs.clr_interrupt_mask_reg);
5346 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5347 return IPR_RC_JOB_CONTINUE;
5348 }
5349
5350 /* Enable destructive diagnostics on IOA */
3d1d0da6 5351 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
5352
5353 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5354 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5355
5356 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5357
5358 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5359 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5360 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5361 ipr_cmd->done = ipr_reset_ioa_job;
5362 add_timer(&ipr_cmd->timer);
5363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5364
5365 LEAVE;
5366 return IPR_RC_JOB_RETURN;
5367}
5368
5369/**
5370 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5371 * @ipr_cmd: ipr command struct
5372 *
5373 * This function is invoked when an adapter dump has run out
5374 * of processing time.
5375 *
5376 * Return value:
5377 * IPR_RC_JOB_CONTINUE
5378 **/
5379static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5380{
5381 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5382
5383 if (ioa_cfg->sdt_state == GET_DUMP)
5384 ioa_cfg->sdt_state = ABORT_DUMP;
5385
5386 ipr_cmd->job_step = ipr_reset_alert;
5387
5388 return IPR_RC_JOB_CONTINUE;
5389}
5390
5391/**
5392 * ipr_unit_check_no_data - Log a unit check/no data error log
5393 * @ioa_cfg: ioa config struct
5394 *
5395 * Logs an error indicating the adapter unit checked, but for some
5396 * reason, we were unable to fetch the unit check buffer.
5397 *
5398 * Return value:
5399 * nothing
5400 **/
5401static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5402{
5403 ioa_cfg->errors_logged++;
5404 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5405}
5406
5407/**
5408 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5409 * @ioa_cfg: ioa config struct
5410 *
5411 * Fetches the unit check buffer from the adapter by clocking the data
5412 * through the mailbox register.
5413 *
5414 * Return value:
5415 * nothing
5416 **/
5417static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5418{
5419 unsigned long mailbox;
5420 struct ipr_hostrcb *hostrcb;
5421 struct ipr_uc_sdt sdt;
5422 int rc, length;
5423
5424 mailbox = readl(ioa_cfg->ioa_mailbox);
5425
5426 if (!ipr_sdt_is_fmt2(mailbox)) {
5427 ipr_unit_check_no_data(ioa_cfg);
5428 return;
5429 }
5430
5431 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5432 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5433 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5434
5435 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5436 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5437 ipr_unit_check_no_data(ioa_cfg);
5438 return;
5439 }
5440
5441 /* Find length of the first sdt entry (UC buffer) */
5442 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5443 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5444
5445 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5446 struct ipr_hostrcb, queue);
5447 list_del(&hostrcb->queue);
5448 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5449
5450 rc = ipr_get_ldump_data_section(ioa_cfg,
5451 be32_to_cpu(sdt.entry[0].bar_str_offset),
5452 (__be32 *)&hostrcb->hcam,
5453 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5454
5455 if (!rc)
5456 ipr_handle_log_data(ioa_cfg, hostrcb);
5457 else
5458 ipr_unit_check_no_data(ioa_cfg);
5459
5460 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5461}
5462
5463/**
5464 * ipr_reset_restore_cfg_space - Restore PCI config space.
5465 * @ipr_cmd: ipr command struct
5466 *
5467 * Description: This function restores the saved PCI config space of
5468 * the adapter, fails all outstanding ops back to the callers, and
5469 * fetches the dump/unit check if applicable to this reset.
5470 *
5471 * Return value:
5472 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5473 **/
5474static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5475{
5476 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5477 int rc;
5478
5479 ENTER;
b30197d2 5480 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5481 rc = pci_restore_state(ioa_cfg->pdev);
5482
5483 if (rc != PCIBIOS_SUCCESSFUL) {
5484 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5485 return IPR_RC_JOB_CONTINUE;
5486 }
5487
5488 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5489 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5490 return IPR_RC_JOB_CONTINUE;
5491 }
5492
5493 ipr_fail_all_ops(ioa_cfg);
5494
5495 if (ioa_cfg->ioa_unit_checked) {
5496 ioa_cfg->ioa_unit_checked = 0;
5497 ipr_get_unit_check_buffer(ioa_cfg);
5498 ipr_cmd->job_step = ipr_reset_alert;
5499 ipr_reset_start_timer(ipr_cmd, 0);
5500 return IPR_RC_JOB_RETURN;
5501 }
5502
5503 if (ioa_cfg->in_ioa_bringdown) {
5504 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5505 } else {
5506 ipr_cmd->job_step = ipr_reset_enable_ioa;
5507
5508 if (GET_DUMP == ioa_cfg->sdt_state) {
5509 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5510 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5511 schedule_work(&ioa_cfg->work_q);
5512 return IPR_RC_JOB_RETURN;
5513 }
5514 }
5515
5516 ENTER;
5517 return IPR_RC_JOB_CONTINUE;
5518}
5519
5520/**
5521 * ipr_reset_start_bist - Run BIST on the adapter.
5522 * @ipr_cmd: ipr command struct
5523 *
5524 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5525 *
5526 * Return value:
5527 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5528 **/
5529static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5530{
5531 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5532 int rc;
5533
5534 ENTER;
b30197d2 5535 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5536 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5537
5538 if (rc != PCIBIOS_SUCCESSFUL) {
5539 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5540 rc = IPR_RC_JOB_CONTINUE;
5541 } else {
5542 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5543 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5544 rc = IPR_RC_JOB_RETURN;
5545 }
5546
5547 LEAVE;
5548 return rc;
5549}
5550
5551/**
5552 * ipr_reset_allowed - Query whether or not IOA can be reset
5553 * @ioa_cfg: ioa config struct
5554 *
5555 * Return value:
5556 * 0 if reset not allowed / non-zero if reset is allowed
5557 **/
5558static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5559{
5560 volatile u32 temp_reg;
5561
5562 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5563 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5564}
5565
5566/**
5567 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5568 * @ipr_cmd: ipr command struct
5569 *
5570 * Description: This function waits for adapter permission to run BIST,
5571 * then runs BIST. If the adapter does not give permission after a
5572 * reasonable time, we will reset the adapter anyway. The impact of
5573 * resetting the adapter without warning the adapter is the risk of
5574 * losing the persistent error log on the adapter. If the adapter is
5575 * reset while it is writing to the flash on the adapter, the flash
5576 * segment will have bad ECC and be zeroed.
5577 *
5578 * Return value:
5579 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5580 **/
5581static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5582{
5583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5584 int rc = IPR_RC_JOB_RETURN;
5585
5586 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5587 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5588 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5589 } else {
5590 ipr_cmd->job_step = ipr_reset_start_bist;
5591 rc = IPR_RC_JOB_CONTINUE;
5592 }
5593
5594 return rc;
5595}
5596
5597/**
5598 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5599 * @ipr_cmd: ipr command struct
5600 *
5601 * Description: This function alerts the adapter that it will be reset.
5602 * If memory space is not currently enabled, proceed directly
5603 * to running BIST on the adapter. The timer must always be started
5604 * so we guarantee we do not run BIST from ipr_isr.
5605 *
5606 * Return value:
5607 * IPR_RC_JOB_RETURN
5608 **/
5609static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5610{
5611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5612 u16 cmd_reg;
5613 int rc;
5614
5615 ENTER;
5616 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5617
5618 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5619 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5620 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5621 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5622 } else {
5623 ipr_cmd->job_step = ipr_reset_start_bist;
5624 }
5625
5626 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5627 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5628
5629 LEAVE;
5630 return IPR_RC_JOB_RETURN;
5631}
5632
5633/**
5634 * ipr_reset_ucode_download_done - Microcode download completion
5635 * @ipr_cmd: ipr command struct
5636 *
5637 * Description: This function unmaps the microcode download buffer.
5638 *
5639 * Return value:
5640 * IPR_RC_JOB_CONTINUE
5641 **/
5642static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5643{
5644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5645 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5646
5647 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5648 sglist->num_sg, DMA_TO_DEVICE);
5649
5650 ipr_cmd->job_step = ipr_reset_alert;
5651 return IPR_RC_JOB_CONTINUE;
5652}
5653
5654/**
5655 * ipr_reset_ucode_download - Download microcode to the adapter
5656 * @ipr_cmd: ipr command struct
5657 *
5658 * Description: This function checks to see if it there is microcode
5659 * to download to the adapter. If there is, a download is performed.
5660 *
5661 * Return value:
5662 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5663 **/
5664static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5665{
5666 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5667 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5668
5669 ENTER;
5670 ipr_cmd->job_step = ipr_reset_alert;
5671
5672 if (!sglist)
5673 return IPR_RC_JOB_CONTINUE;
5674
5675 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5676 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5677 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5678 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5679 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5680 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5681 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5682
12baa420 5683 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
5684 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5685
5686 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5687 IPR_WRITE_BUFFER_TIMEOUT);
5688
5689 LEAVE;
5690 return IPR_RC_JOB_RETURN;
5691}
5692
5693/**
5694 * ipr_reset_shutdown_ioa - Shutdown the adapter
5695 * @ipr_cmd: ipr command struct
5696 *
5697 * Description: This function issues an adapter shutdown of the
5698 * specified type to the specified adapter as part of the
5699 * adapter reset job.
5700 *
5701 * Return value:
5702 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5703 **/
5704static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5705{
5706 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5707 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5708 unsigned long timeout;
5709 int rc = IPR_RC_JOB_CONTINUE;
5710
5711 ENTER;
5712 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5713 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5714 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5715 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5716 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5717
5718 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5719 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5720 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5721 timeout = IPR_INTERNAL_TIMEOUT;
5722 else
5723 timeout = IPR_SHUTDOWN_TIMEOUT;
5724
5725 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5726
5727 rc = IPR_RC_JOB_RETURN;
5728 ipr_cmd->job_step = ipr_reset_ucode_download;
5729 } else
5730 ipr_cmd->job_step = ipr_reset_alert;
5731
5732 LEAVE;
5733 return rc;
5734}
5735
5736/**
5737 * ipr_reset_ioa_job - Adapter reset job
5738 * @ipr_cmd: ipr command struct
5739 *
5740 * Description: This function is the job router for the adapter reset job.
5741 *
5742 * Return value:
5743 * none
5744 **/
5745static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5746{
5747 u32 rc, ioasc;
1da177e4
LT
5748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5749
5750 do {
5751 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5752
5753 if (ioa_cfg->reset_cmd != ipr_cmd) {
5754 /*
5755 * We are doing nested adapter resets and this is
5756 * not the current reset job.
5757 */
5758 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5759 return;
5760 }
5761
5762 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 5763 rc = ipr_cmd->job_step_failed(ipr_cmd);
5764 if (rc == IPR_RC_JOB_RETURN)
5765 return;
1da177e4
LT
5766 }
5767
5768 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 5769 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
5770 rc = ipr_cmd->job_step(ipr_cmd);
5771 } while(rc == IPR_RC_JOB_CONTINUE);
5772}
5773
5774/**
5775 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5776 * @ioa_cfg: ioa config struct
5777 * @job_step: first job step of reset job
5778 * @shutdown_type: shutdown type
5779 *
5780 * Description: This function will initiate the reset of the given adapter
5781 * starting at the selected job step.
5782 * If the caller needs to wait on the completion of the reset,
5783 * the caller must sleep on the reset_wait_q.
5784 *
5785 * Return value:
5786 * none
5787 **/
5788static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5789 int (*job_step) (struct ipr_cmnd *),
5790 enum ipr_shutdown_type shutdown_type)
5791{
5792 struct ipr_cmnd *ipr_cmd;
5793
5794 ioa_cfg->in_reset_reload = 1;
5795 ioa_cfg->allow_cmds = 0;
5796 scsi_block_requests(ioa_cfg->host);
5797
5798 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5799 ioa_cfg->reset_cmd = ipr_cmd;
5800 ipr_cmd->job_step = job_step;
5801 ipr_cmd->u.shutdown_type = shutdown_type;
5802
5803 ipr_reset_ioa_job(ipr_cmd);
5804}
5805
5806/**
5807 * ipr_initiate_ioa_reset - Initiate an adapter reset
5808 * @ioa_cfg: ioa config struct
5809 * @shutdown_type: shutdown type
5810 *
5811 * Description: This function will initiate the reset of the given adapter.
5812 * If the caller needs to wait on the completion of the reset,
5813 * the caller must sleep on the reset_wait_q.
5814 *
5815 * Return value:
5816 * none
5817 **/
5818static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5819 enum ipr_shutdown_type shutdown_type)
5820{
5821 if (ioa_cfg->ioa_is_dead)
5822 return;
5823
5824 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5825 ioa_cfg->sdt_state = ABORT_DUMP;
5826
5827 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5828 dev_err(&ioa_cfg->pdev->dev,
5829 "IOA taken offline - error recovery failed\n");
5830
5831 ioa_cfg->reset_retries = 0;
5832 ioa_cfg->ioa_is_dead = 1;
5833
5834 if (ioa_cfg->in_ioa_bringdown) {
5835 ioa_cfg->reset_cmd = NULL;
5836 ioa_cfg->in_reset_reload = 0;
5837 ipr_fail_all_ops(ioa_cfg);
5838 wake_up_all(&ioa_cfg->reset_wait_q);
5839
5840 spin_unlock_irq(ioa_cfg->host->host_lock);
5841 scsi_unblock_requests(ioa_cfg->host);
5842 spin_lock_irq(ioa_cfg->host->host_lock);
5843 return;
5844 } else {
5845 ioa_cfg->in_ioa_bringdown = 1;
5846 shutdown_type = IPR_SHUTDOWN_NONE;
5847 }
5848 }
5849
5850 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5851 shutdown_type);
5852}
5853
f8a88b19
LV
5854/**
5855 * ipr_reset_freeze - Hold off all I/O activity
5856 * @ipr_cmd: ipr command struct
5857 *
5858 * Description: If the PCI slot is frozen, hold off all I/O
5859 * activity; then, as soon as the slot is available again,
5860 * initiate an adapter reset.
5861 */
5862static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5863{
5864 /* Disallow new interrupts, avoid loop */
5865 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5866 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5867 ipr_cmd->done = ipr_reset_ioa_job;
5868 return IPR_RC_JOB_RETURN;
5869}
5870
5871/**
5872 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5873 * @pdev: PCI device struct
5874 *
5875 * Description: This routine is called to tell us that the PCI bus
5876 * is down. Can't do anything here, except put the device driver
5877 * into a holding pattern, waiting for the PCI bus to come back.
5878 */
5879static void ipr_pci_frozen(struct pci_dev *pdev)
5880{
5881 unsigned long flags = 0;
5882 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5883
5884 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5885 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5887}
5888
5889/**
5890 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5891 * @pdev: PCI device struct
5892 *
5893 * Description: This routine is called by the pci error recovery
5894 * code after the PCI slot has been reset, just before we
5895 * should resume normal operations.
5896 */
5897static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5898{
5899 unsigned long flags = 0;
5900 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5901
5902 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5903 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5904 IPR_SHUTDOWN_NONE);
5905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5906 return PCI_ERS_RESULT_RECOVERED;
5907}
5908
5909/**
5910 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5911 * @pdev: PCI device struct
5912 *
5913 * Description: This routine is called when the PCI bus has
5914 * permanently failed.
5915 */
5916static void ipr_pci_perm_failure(struct pci_dev *pdev)
5917{
5918 unsigned long flags = 0;
5919 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5920
5921 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5922 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5923 ioa_cfg->sdt_state = ABORT_DUMP;
5924 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5925 ioa_cfg->in_ioa_bringdown = 1;
5926 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5928}
5929
5930/**
5931 * ipr_pci_error_detected - Called when a PCI error is detected.
5932 * @pdev: PCI device struct
5933 * @state: PCI channel state
5934 *
5935 * Description: Called when a PCI error is detected.
5936 *
5937 * Return value:
5938 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5939 */
5940static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5941 pci_channel_state_t state)
5942{
5943 switch (state) {
5944 case pci_channel_io_frozen:
5945 ipr_pci_frozen(pdev);
5946 return PCI_ERS_RESULT_NEED_RESET;
5947 case pci_channel_io_perm_failure:
5948 ipr_pci_perm_failure(pdev);
5949 return PCI_ERS_RESULT_DISCONNECT;
5950 break;
5951 default:
5952 break;
5953 }
5954 return PCI_ERS_RESULT_NEED_RESET;
5955}
5956
1da177e4
LT
5957/**
5958 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5959 * @ioa_cfg: ioa cfg struct
5960 *
5961 * Description: This is the second phase of adapter intialization
5962 * This function takes care of initilizing the adapter to the point
5963 * where it can accept new commands.
5964
5965 * Return value:
5966 * 0 on sucess / -EIO on failure
5967 **/
5968static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5969{
5970 int rc = 0;
5971 unsigned long host_lock_flags = 0;
5972
5973 ENTER;
5974 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5975 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 5976 if (ioa_cfg->needs_hard_reset) {
5977 ioa_cfg->needs_hard_reset = 0;
5978 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5979 } else
5980 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5981 IPR_SHUTDOWN_NONE);
1da177e4
LT
5982
5983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5984 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5985 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5986
5987 if (ioa_cfg->ioa_is_dead) {
5988 rc = -EIO;
5989 } else if (ipr_invalid_adapter(ioa_cfg)) {
5990 if (!ipr_testmode)
5991 rc = -EIO;
5992
5993 dev_err(&ioa_cfg->pdev->dev,
5994 "Adapter not supported in this hardware configuration.\n");
5995 }
5996
5997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5998
5999 LEAVE;
6000 return rc;
6001}
6002
6003/**
6004 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6005 * @ioa_cfg: ioa config struct
6006 *
6007 * Return value:
6008 * none
6009 **/
6010static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6011{
6012 int i;
6013
6014 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6015 if (ioa_cfg->ipr_cmnd_list[i])
6016 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6017 ioa_cfg->ipr_cmnd_list[i],
6018 ioa_cfg->ipr_cmnd_list_dma[i]);
6019
6020 ioa_cfg->ipr_cmnd_list[i] = NULL;
6021 }
6022
6023 if (ioa_cfg->ipr_cmd_pool)
6024 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6025
6026 ioa_cfg->ipr_cmd_pool = NULL;
6027}
6028
6029/**
6030 * ipr_free_mem - Frees memory allocated for an adapter
6031 * @ioa_cfg: ioa cfg struct
6032 *
6033 * Return value:
6034 * nothing
6035 **/
6036static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6037{
6038 int i;
6039
6040 kfree(ioa_cfg->res_entries);
6041 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6042 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6043 ipr_free_cmd_blks(ioa_cfg);
6044 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6045 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6046 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6047 ioa_cfg->cfg_table,
6048 ioa_cfg->cfg_table_dma);
6049
6050 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6051 pci_free_consistent(ioa_cfg->pdev,
6052 sizeof(struct ipr_hostrcb),
6053 ioa_cfg->hostrcb[i],
6054 ioa_cfg->hostrcb_dma[i]);
6055 }
6056
6057 ipr_free_dump(ioa_cfg);
1da177e4
LT
6058 kfree(ioa_cfg->trace);
6059}
6060
6061/**
6062 * ipr_free_all_resources - Free all allocated resources for an adapter.
6063 * @ipr_cmd: ipr command struct
6064 *
6065 * This function frees all allocated resources for the
6066 * specified adapter.
6067 *
6068 * Return value:
6069 * none
6070 **/
6071static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6072{
6073 struct pci_dev *pdev = ioa_cfg->pdev;
6074
6075 ENTER;
6076 free_irq(pdev->irq, ioa_cfg);
6077 iounmap(ioa_cfg->hdw_dma_regs);
6078 pci_release_regions(pdev);
6079 ipr_free_mem(ioa_cfg);
6080 scsi_host_put(ioa_cfg->host);
6081 pci_disable_device(pdev);
6082 LEAVE;
6083}
6084
6085/**
6086 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6087 * @ioa_cfg: ioa config struct
6088 *
6089 * Return value:
6090 * 0 on success / -ENOMEM on allocation failure
6091 **/
6092static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6093{
6094 struct ipr_cmnd *ipr_cmd;
6095 struct ipr_ioarcb *ioarcb;
6096 dma_addr_t dma_addr;
6097 int i;
6098
6099 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6100 sizeof(struct ipr_cmnd), 8, 0);
6101
6102 if (!ioa_cfg->ipr_cmd_pool)
6103 return -ENOMEM;
6104
6105 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6106 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6107
6108 if (!ipr_cmd) {
6109 ipr_free_cmd_blks(ioa_cfg);
6110 return -ENOMEM;
6111 }
6112
6113 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6114 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6115 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6116
6117 ioarcb = &ipr_cmd->ioarcb;
6118 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6119 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6120 ioarcb->write_ioadl_addr =
6121 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6122 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6123 ioarcb->ioasa_host_pci_addr =
6124 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6125 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6126 ipr_cmd->cmd_index = i;
6127 ipr_cmd->ioa_cfg = ioa_cfg;
6128 ipr_cmd->sense_buffer_dma = dma_addr +
6129 offsetof(struct ipr_cmnd, sense_buffer);
6130
6131 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6132 }
6133
6134 return 0;
6135}
6136
6137/**
6138 * ipr_alloc_mem - Allocate memory for an adapter
6139 * @ioa_cfg: ioa config struct
6140 *
6141 * Return value:
6142 * 0 on success / non-zero for error
6143 **/
6144static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6145{
6146 struct pci_dev *pdev = ioa_cfg->pdev;
6147 int i, rc = -ENOMEM;
6148
6149 ENTER;
0bc42e35 6150 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
6151 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6152
6153 if (!ioa_cfg->res_entries)
6154 goto out;
6155
1da177e4
LT
6156 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6157 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6158
6159 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6160 sizeof(struct ipr_misc_cbs),
6161 &ioa_cfg->vpd_cbs_dma);
6162
6163 if (!ioa_cfg->vpd_cbs)
6164 goto out_free_res_entries;
6165
6166 if (ipr_alloc_cmd_blks(ioa_cfg))
6167 goto out_free_vpd_cbs;
6168
6169 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6170 sizeof(u32) * IPR_NUM_CMD_BLKS,
6171 &ioa_cfg->host_rrq_dma);
6172
6173 if (!ioa_cfg->host_rrq)
6174 goto out_ipr_free_cmd_blocks;
6175
6176 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6177 sizeof(struct ipr_config_table),
6178 &ioa_cfg->cfg_table_dma);
6179
6180 if (!ioa_cfg->cfg_table)
6181 goto out_free_host_rrq;
6182
6183 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6184 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6185 sizeof(struct ipr_hostrcb),
6186 &ioa_cfg->hostrcb_dma[i]);
6187
6188 if (!ioa_cfg->hostrcb[i])
6189 goto out_free_hostrcb_dma;
6190
6191 ioa_cfg->hostrcb[i]->hostrcb_dma =
6192 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6193 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6194 }
6195
0bc42e35 6196 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
6197 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6198
6199 if (!ioa_cfg->trace)
6200 goto out_free_hostrcb_dma;
6201
1da177e4
LT
6202 rc = 0;
6203out:
6204 LEAVE;
6205 return rc;
6206
6207out_free_hostrcb_dma:
6208 while (i-- > 0) {
6209 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6210 ioa_cfg->hostrcb[i],
6211 ioa_cfg->hostrcb_dma[i]);
6212 }
6213 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6214 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6215out_free_host_rrq:
6216 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6217 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6218out_ipr_free_cmd_blocks:
6219 ipr_free_cmd_blks(ioa_cfg);
6220out_free_vpd_cbs:
6221 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6222 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6223out_free_res_entries:
6224 kfree(ioa_cfg->res_entries);
6225 goto out;
6226}
6227
6228/**
6229 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6230 * @ioa_cfg: ioa config struct
6231 *
6232 * Return value:
6233 * none
6234 **/
6235static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6236{
6237 int i;
6238
6239 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6240 ioa_cfg->bus_attr[i].bus = i;
6241 ioa_cfg->bus_attr[i].qas_enabled = 0;
6242 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6243 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6244 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6245 else
6246 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6247 }
6248}
6249
6250/**
6251 * ipr_init_ioa_cfg - Initialize IOA config struct
6252 * @ioa_cfg: ioa config struct
6253 * @host: scsi host struct
6254 * @pdev: PCI dev struct
6255 *
6256 * Return value:
6257 * none
6258 **/
6259static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6260 struct Scsi_Host *host, struct pci_dev *pdev)
6261{
6262 const struct ipr_interrupt_offsets *p;
6263 struct ipr_interrupts *t;
6264 void __iomem *base;
6265
6266 ioa_cfg->host = host;
6267 ioa_cfg->pdev = pdev;
6268 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 6269 ioa_cfg->doorbell = IPR_DOORBELL;
32d29776 6270 if (!ipr_auto_create)
6271 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6272 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6273 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6274 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6275 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6276 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6277 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6278 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6279 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6280
6281 INIT_LIST_HEAD(&ioa_cfg->free_q);
6282 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6283 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6284 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6285 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6286 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6287 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6288 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6289 ioa_cfg->sdt_state = INACTIVE;
62275040 6290 if (ipr_enable_cache)
6291 ioa_cfg->cache_state = CACHE_ENABLED;
6292 else
6293 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
6294
6295 ipr_initialize_bus_attr(ioa_cfg);
6296
6297 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6298 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6299 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6300 host->unique_id = host->host_no;
6301 host->max_cmd_len = IPR_MAX_CDB_LEN;
6302 pci_set_drvdata(pdev, ioa_cfg);
6303
6304 p = &ioa_cfg->chip_cfg->regs;
6305 t = &ioa_cfg->regs;
6306 base = ioa_cfg->hdw_dma_regs;
6307
6308 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6309 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6310 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6311 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6312 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6313 t->ioarrin_reg = base + p->ioarrin_reg;
6314 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6315 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6316 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6317}
6318
6319/**
6320 * ipr_get_chip_cfg - Find adapter chip configuration
6321 * @dev_id: PCI device id struct
6322 *
6323 * Return value:
6324 * ptr to chip config on success / NULL on failure
6325 **/
6326static const struct ipr_chip_cfg_t * __devinit
6327ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6328{
6329 int i;
6330
6331 if (dev_id->driver_data)
6332 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6333
6334 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6335 if (ipr_chip[i].vendor == dev_id->vendor &&
6336 ipr_chip[i].device == dev_id->device)
6337 return ipr_chip[i].cfg;
6338 return NULL;
6339}
6340
6341/**
6342 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6343 * @pdev: PCI device struct
6344 * @dev_id: PCI device id struct
6345 *
6346 * Return value:
6347 * 0 on success / non-zero on failure
6348 **/
6349static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6350 const struct pci_device_id *dev_id)
6351{
6352 struct ipr_ioa_cfg *ioa_cfg;
6353 struct Scsi_Host *host;
6354 unsigned long ipr_regs_pci;
6355 void __iomem *ipr_regs;
6356 u32 rc = PCIBIOS_SUCCESSFUL;
ce155cce 6357 volatile u32 mask, uproc;
1da177e4
LT
6358
6359 ENTER;
6360
6361 if ((rc = pci_enable_device(pdev))) {
6362 dev_err(&pdev->dev, "Cannot enable adapter\n");
6363 goto out;
6364 }
6365
6366 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6367
6368 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6369
6370 if (!host) {
6371 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6372 rc = -ENOMEM;
6373 goto out_disable;
6374 }
6375
6376 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6377 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6378
6379 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6380
6381 if (!ioa_cfg->chip_cfg) {
6382 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6383 dev_id->vendor, dev_id->device);
6384 goto out_scsi_host_put;
6385 }
6386
6387 ipr_regs_pci = pci_resource_start(pdev, 0);
6388
6389 rc = pci_request_regions(pdev, IPR_NAME);
6390 if (rc < 0) {
6391 dev_err(&pdev->dev,
6392 "Couldn't register memory range of registers\n");
6393 goto out_scsi_host_put;
6394 }
6395
6396 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6397
6398 if (!ipr_regs) {
6399 dev_err(&pdev->dev,
6400 "Couldn't map memory range of registers\n");
6401 rc = -ENOMEM;
6402 goto out_release_regions;
6403 }
6404
6405 ioa_cfg->hdw_dma_regs = ipr_regs;
6406 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6407 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6408
6409 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6410
6411 pci_set_master(pdev);
6412
6413 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6414 if (rc < 0) {
6415 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6416 goto cleanup_nomem;
6417 }
6418
6419 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6420 ioa_cfg->chip_cfg->cache_line_size);
6421
6422 if (rc != PCIBIOS_SUCCESSFUL) {
6423 dev_err(&pdev->dev, "Write of cache line size failed\n");
6424 rc = -EIO;
6425 goto cleanup_nomem;
6426 }
6427
6428 /* Save away PCI config space for use following IOA reset */
6429 rc = pci_save_state(pdev);
6430
6431 if (rc != PCIBIOS_SUCCESSFUL) {
6432 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6433 rc = -EIO;
6434 goto cleanup_nomem;
6435 }
6436
6437 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6438 goto cleanup_nomem;
6439
6440 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6441 goto cleanup_nomem;
6442
6443 rc = ipr_alloc_mem(ioa_cfg);
6444 if (rc < 0) {
6445 dev_err(&pdev->dev,
6446 "Couldn't allocate enough memory for device driver!\n");
6447 goto cleanup_nomem;
6448 }
6449
ce155cce 6450 /*
6451 * If HRRQ updated interrupt is not masked, or reset alert is set,
6452 * the card is in an unknown state and needs a hard reset
6453 */
6454 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6455 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6456 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6457 ioa_cfg->needs_hard_reset = 1;
6458
1da177e4 6459 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 6460 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
6461
6462 if (rc) {
6463 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6464 pdev->irq, rc);
6465 goto cleanup_nolog;
6466 }
6467
6468 spin_lock(&ipr_driver_lock);
6469 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6470 spin_unlock(&ipr_driver_lock);
6471
6472 LEAVE;
6473out:
6474 return rc;
6475
6476cleanup_nolog:
6477 ipr_free_mem(ioa_cfg);
6478cleanup_nomem:
6479 iounmap(ipr_regs);
6480out_release_regions:
6481 pci_release_regions(pdev);
6482out_scsi_host_put:
6483 scsi_host_put(host);
6484out_disable:
6485 pci_disable_device(pdev);
6486 goto out;
6487}
6488
6489/**
6490 * ipr_scan_vsets - Scans for VSET devices
6491 * @ioa_cfg: ioa config struct
6492 *
6493 * Description: Since the VSET resources do not follow SAM in that we can have
6494 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6495 *
6496 * Return value:
6497 * none
6498 **/
6499static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6500{
6501 int target, lun;
6502
6503 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6504 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6505 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6506}
6507
6508/**
6509 * ipr_initiate_ioa_bringdown - Bring down an adapter
6510 * @ioa_cfg: ioa config struct
6511 * @shutdown_type: shutdown type
6512 *
6513 * Description: This function will initiate bringing down the adapter.
6514 * This consists of issuing an IOA shutdown to the adapter
6515 * to flush the cache, and running BIST.
6516 * If the caller needs to wait on the completion of the reset,
6517 * the caller must sleep on the reset_wait_q.
6518 *
6519 * Return value:
6520 * none
6521 **/
6522static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6523 enum ipr_shutdown_type shutdown_type)
6524{
6525 ENTER;
6526 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6527 ioa_cfg->sdt_state = ABORT_DUMP;
6528 ioa_cfg->reset_retries = 0;
6529 ioa_cfg->in_ioa_bringdown = 1;
6530 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6531 LEAVE;
6532}
6533
6534/**
6535 * __ipr_remove - Remove a single adapter
6536 * @pdev: pci device struct
6537 *
6538 * Adapter hot plug remove entry point.
6539 *
6540 * Return value:
6541 * none
6542 **/
6543static void __ipr_remove(struct pci_dev *pdev)
6544{
6545 unsigned long host_lock_flags = 0;
6546 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6547 ENTER;
6548
6549 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6550 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6551
6552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6553 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 6554 flush_scheduled_work();
1da177e4
LT
6555 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6556
6557 spin_lock(&ipr_driver_lock);
6558 list_del(&ioa_cfg->queue);
6559 spin_unlock(&ipr_driver_lock);
6560
6561 if (ioa_cfg->sdt_state == ABORT_DUMP)
6562 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6564
6565 ipr_free_all_resources(ioa_cfg);
6566
6567 LEAVE;
6568}
6569
6570/**
6571 * ipr_remove - IOA hot plug remove entry point
6572 * @pdev: pci device struct
6573 *
6574 * Adapter hot plug remove entry point.
6575 *
6576 * Return value:
6577 * none
6578 **/
6579static void ipr_remove(struct pci_dev *pdev)
6580{
6581 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6582
6583 ENTER;
6584
1da177e4
LT
6585 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6586 &ipr_trace_attr);
6587 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6588 &ipr_dump_attr);
6589 scsi_remove_host(ioa_cfg->host);
6590
6591 __ipr_remove(pdev);
6592
6593 LEAVE;
6594}
6595
6596/**
6597 * ipr_probe - Adapter hot plug add entry point
6598 *
6599 * Return value:
6600 * 0 on success / non-zero on failure
6601 **/
6602static int __devinit ipr_probe(struct pci_dev *pdev,
6603 const struct pci_device_id *dev_id)
6604{
6605 struct ipr_ioa_cfg *ioa_cfg;
6606 int rc;
6607
6608 rc = ipr_probe_ioa(pdev, dev_id);
6609
6610 if (rc)
6611 return rc;
6612
6613 ioa_cfg = pci_get_drvdata(pdev);
6614 rc = ipr_probe_ioa_part2(ioa_cfg);
6615
6616 if (rc) {
6617 __ipr_remove(pdev);
6618 return rc;
6619 }
6620
6621 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6622
6623 if (rc) {
6624 __ipr_remove(pdev);
6625 return rc;
6626 }
6627
6628 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6629 &ipr_trace_attr);
6630
6631 if (rc) {
6632 scsi_remove_host(ioa_cfg->host);
6633 __ipr_remove(pdev);
6634 return rc;
6635 }
6636
6637 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6638 &ipr_dump_attr);
6639
6640 if (rc) {
6641 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6642 &ipr_trace_attr);
6643 scsi_remove_host(ioa_cfg->host);
6644 __ipr_remove(pdev);
6645 return rc;
6646 }
6647
6648 scsi_scan_host(ioa_cfg->host);
6649 ipr_scan_vsets(ioa_cfg);
6650 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6651 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 6652 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
6653 schedule_work(&ioa_cfg->work_q);
6654 return 0;
6655}
6656
6657/**
6658 * ipr_shutdown - Shutdown handler.
d18c3db5 6659 * @pdev: pci device struct
1da177e4
LT
6660 *
6661 * This function is invoked upon system shutdown/reboot. It will issue
6662 * an adapter shutdown to the adapter to flush the write cache.
6663 *
6664 * Return value:
6665 * none
6666 **/
d18c3db5 6667static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 6668{
d18c3db5 6669 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
6670 unsigned long lock_flags = 0;
6671
6672 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6673 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6676}
6677
6678static struct pci_device_id ipr_pci_table[] __devinitdata = {
6679 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6681 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6682 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6684 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6685 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6687 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6688 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6690 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6691 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6692 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6693 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6696 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6697 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6698 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6699 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
86f51436 6700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6702 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6703 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6704 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6705 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6706 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6708 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6711 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6714 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
1da177e4
LT
6715 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6717 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6718 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6720 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
86f51436 6721 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6723 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
1da177e4
LT
6724 { }
6725};
6726MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6727
f8a88b19
LV
6728static struct pci_error_handlers ipr_err_handler = {
6729 .error_detected = ipr_pci_error_detected,
6730 .slot_reset = ipr_pci_slot_reset,
6731};
6732
1da177e4
LT
6733static struct pci_driver ipr_driver = {
6734 .name = IPR_NAME,
6735 .id_table = ipr_pci_table,
6736 .probe = ipr_probe,
6737 .remove = ipr_remove,
d18c3db5 6738 .shutdown = ipr_shutdown,
f8a88b19 6739 .err_handler = &ipr_err_handler,
1da177e4
LT
6740};
6741
6742/**
6743 * ipr_init - Module entry point
6744 *
6745 * Return value:
6746 * 0 on success / negative value on failure
6747 **/
6748static int __init ipr_init(void)
6749{
6750 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6751 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6752
6753 return pci_module_init(&ipr_driver);
6754}
6755
6756/**
6757 * ipr_exit - Module unload
6758 *
6759 * Module unload entry point.
6760 *
6761 * Return value:
6762 * none
6763 **/
6764static void __exit ipr_exit(void)
6765{
6766 pci_unregister_driver(&ipr_driver);
6767}
6768
6769module_init(ipr_init);
6770module_exit(ipr_exit);
This page took 0.566518 seconds and 5 git commands to generate.