[SCSI] ipr: Add some hardware defined types for SATA
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <asm/io.h>
74#include <asm/irq.h>
75#include <asm/processor.h>
76#include <scsi/scsi.h>
77#include <scsi/scsi_host.h>
78#include <scsi/scsi_tcq.h>
79#include <scsi/scsi_eh.h>
80#include <scsi/scsi_cmnd.h>
1da177e4
LT
81#include "ipr.h"
82
83/*
84 * Global Data
85 */
86static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
87static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
88static unsigned int ipr_max_speed = 1;
89static int ipr_testmode = 0;
90static unsigned int ipr_fastfail = 0;
91static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
62275040 92static unsigned int ipr_enable_cache = 1;
d3c74871 93static unsigned int ipr_debug = 0;
32d29776 94static int ipr_auto_create = 1;
1da177e4
LT
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
86f51436 99 { /* Gemstone, Citrine, and Obsidian */
1da177e4
LT
100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436 134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
1da177e4
LT
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
138};
139
140static int ipr_max_bus_speeds [] = {
141 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
142};
143
144MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146module_param_named(max_speed, ipr_max_speed, uint, 0);
147MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148module_param_named(log_level, ipr_log_level, uint, 0);
149MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150module_param_named(testmode, ipr_testmode, int, 0);
151MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152module_param_named(fastfail, ipr_fastfail, int, 0);
153MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040 156module_param_named(enable_cache, ipr_enable_cache, int, 0);
157MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871 158module_param_named(debug, ipr_debug, int, 0);
159MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
32d29776 160module_param_named(auto_create, ipr_auto_create, int, 0);
161MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
1da177e4
LT
162MODULE_LICENSE("GPL");
163MODULE_VERSION(IPR_DRIVER_VERSION);
164
1da177e4
LT
165/* A constant array of IOASCs/URCs/Error Messages */
166static const
167struct ipr_error_table_t ipr_error_table[] = {
168 {0x00000000, 1, 1,
169 "8155: An unknown error was received"},
170 {0x00330000, 0, 0,
171 "Soft underlength error"},
172 {0x005A0000, 0, 0,
173 "Command to be cancelled not found"},
174 {0x00808000, 0, 0,
175 "Qualified success"},
176 {0x01080000, 1, 1,
177 "FFFE: Soft device bus error recovered by the IOA"},
178 {0x01170600, 0, 1,
179 "FFF9: Device sector reassign successful"},
180 {0x01170900, 0, 1,
181 "FFF7: Media error recovered by device rewrite procedures"},
182 {0x01180200, 0, 1,
183 "7001: IOA sector reassignment successful"},
184 {0x01180500, 0, 1,
185 "FFF9: Soft media error. Sector reassignment recommended"},
186 {0x01180600, 0, 1,
187 "FFF7: Media error recovered by IOA rewrite procedures"},
188 {0x01418000, 0, 1,
189 "FF3D: Soft PCI bus error recovered by the IOA"},
190 {0x01440000, 1, 1,
191 "FFF6: Device hardware error recovered by the IOA"},
192 {0x01448100, 0, 1,
193 "FFF6: Device hardware error recovered by the device"},
194 {0x01448200, 1, 1,
195 "FF3D: Soft IOA error recovered by the IOA"},
196 {0x01448300, 0, 1,
197 "FFFA: Undefined device response recovered by the IOA"},
198 {0x014A0000, 1, 1,
199 "FFF6: Device bus error, message or command phase"},
200 {0x015D0000, 0, 1,
201 "FFF6: Failure prediction threshold exceeded"},
202 {0x015D9200, 0, 1,
203 "8009: Impending cache battery pack failure"},
204 {0x02040400, 0, 0,
205 "34FF: Disk device format in progress"},
206 {0x023F0000, 0, 0,
207 "Synchronization required"},
208 {0x024E0000, 0, 0,
209 "No ready, IOA shutdown"},
210 {0x025A0000, 0, 0,
211 "Not ready, IOA has been shutdown"},
212 {0x02670100, 0, 1,
213 "3020: Storage subsystem configuration error"},
214 {0x03110B00, 0, 0,
215 "FFF5: Medium error, data unreadable, recommend reassign"},
216 {0x03110C00, 0, 0,
217 "7000: Medium error, data unreadable, do not reassign"},
218 {0x03310000, 0, 1,
219 "FFF3: Disk media format bad"},
220 {0x04050000, 0, 1,
221 "3002: Addressed device failed to respond to selection"},
222 {0x04080000, 1, 1,
223 "3100: Device bus error"},
224 {0x04080100, 0, 1,
225 "3109: IOA timed out a device command"},
226 {0x04088000, 0, 0,
227 "3120: SCSI bus is not operational"},
228 {0x04118000, 0, 1,
229 "9000: IOA reserved area data check"},
230 {0x04118100, 0, 1,
231 "9001: IOA reserved area invalid data pattern"},
232 {0x04118200, 0, 1,
233 "9002: IOA reserved area LRC error"},
234 {0x04320000, 0, 1,
235 "102E: Out of alternate sectors for disk storage"},
236 {0x04330000, 1, 1,
237 "FFF4: Data transfer underlength error"},
238 {0x04338000, 1, 1,
239 "FFF4: Data transfer overlength error"},
240 {0x043E0100, 0, 1,
241 "3400: Logical unit failure"},
242 {0x04408500, 0, 1,
243 "FFF4: Device microcode is corrupt"},
244 {0x04418000, 1, 1,
245 "8150: PCI bus error"},
246 {0x04430000, 1, 0,
247 "Unsupported device bus message received"},
248 {0x04440000, 1, 1,
249 "FFF4: Disk device problem"},
250 {0x04448200, 1, 1,
251 "8150: Permanent IOA failure"},
252 {0x04448300, 0, 1,
253 "3010: Disk device returned wrong response to IOA"},
254 {0x04448400, 0, 1,
255 "8151: IOA microcode error"},
256 {0x04448500, 0, 0,
257 "Device bus status error"},
258 {0x04448600, 0, 1,
259 "8157: IOA error requiring IOA reset to recover"},
260 {0x04490000, 0, 0,
261 "Message reject received from the device"},
262 {0x04449200, 0, 1,
263 "8008: A permanent cache battery pack failure occurred"},
264 {0x0444A000, 0, 1,
265 "9090: Disk unit has been modified after the last known status"},
266 {0x0444A200, 0, 1,
267 "9081: IOA detected device error"},
268 {0x0444A300, 0, 1,
269 "9082: IOA detected device error"},
270 {0x044A0000, 1, 1,
271 "3110: Device bus error, message or command phase"},
272 {0x04670400, 0, 1,
273 "9091: Incorrect hardware configuration change has been detected"},
b0df54bb 274 {0x04678000, 0, 1,
275 "9073: Invalid multi-adapter configuration"},
1da177e4
LT
276 {0x046E0000, 0, 1,
277 "FFF4: Command to logical unit failed"},
278 {0x05240000, 1, 0,
279 "Illegal request, invalid request type or request packet"},
280 {0x05250000, 0, 0,
281 "Illegal request, invalid resource handle"},
b0df54bb 282 {0x05258000, 0, 0,
283 "Illegal request, commands not allowed to this device"},
284 {0x05258100, 0, 0,
285 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
286 {0x05260000, 0, 0,
287 "Illegal request, invalid field in parameter list"},
288 {0x05260100, 0, 0,
289 "Illegal request, parameter not supported"},
290 {0x05260200, 0, 0,
291 "Illegal request, parameter value invalid"},
292 {0x052C0000, 0, 0,
293 "Illegal request, command sequence error"},
b0df54bb 294 {0x052C8000, 1, 0,
295 "Illegal request, dual adapter support not enabled"},
1da177e4
LT
296 {0x06040500, 0, 1,
297 "9031: Array protection temporarily suspended, protection resuming"},
298 {0x06040600, 0, 1,
299 "9040: Array protection temporarily suspended, protection resuming"},
300 {0x06290000, 0, 1,
301 "FFFB: SCSI bus was reset"},
302 {0x06290500, 0, 0,
303 "FFFE: SCSI bus transition to single ended"},
304 {0x06290600, 0, 0,
305 "FFFE: SCSI bus transition to LVD"},
306 {0x06298000, 0, 1,
307 "FFFB: SCSI bus was reset by another initiator"},
308 {0x063F0300, 0, 1,
309 "3029: A device replacement has occurred"},
310 {0x064C8000, 0, 1,
311 "9051: IOA cache data exists for a missing or failed device"},
b0df54bb 312 {0x064C8100, 0, 1,
313 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
1da177e4
LT
314 {0x06670100, 0, 1,
315 "9025: Disk unit is not supported at its physical location"},
316 {0x06670600, 0, 1,
317 "3020: IOA detected a SCSI bus configuration error"},
318 {0x06678000, 0, 1,
319 "3150: SCSI bus configuration error"},
b0df54bb 320 {0x06678100, 0, 1,
321 "9074: Asymmetric advanced function disk configuration"},
1da177e4
LT
322 {0x06690200, 0, 1,
323 "9041: Array protection temporarily suspended"},
324 {0x06698200, 0, 1,
325 "9042: Corrupt array parity detected on specified device"},
326 {0x066B0200, 0, 1,
327 "9030: Array no longer protected due to missing or failed disk unit"},
b0df54bb 328 {0x066B8000, 0, 1,
329 "9071: Link operational transition"},
330 {0x066B8100, 0, 1,
331 "9072: Link not operational transition"},
1da177e4
LT
332 {0x066B8200, 0, 1,
333 "9032: Array exposed but still protected"},
334 {0x07270000, 0, 0,
335 "Failure due to other device"},
336 {0x07278000, 0, 1,
337 "9008: IOA does not support functions expected by devices"},
338 {0x07278100, 0, 1,
339 "9010: Cache data associated with attached devices cannot be found"},
340 {0x07278200, 0, 1,
341 "9011: Cache data belongs to devices other than those attached"},
342 {0x07278400, 0, 1,
343 "9020: Array missing 2 or more devices with only 1 device present"},
344 {0x07278500, 0, 1,
345 "9021: Array missing 2 or more devices with 2 or more devices present"},
346 {0x07278600, 0, 1,
347 "9022: Exposed array is missing a required device"},
348 {0x07278700, 0, 1,
349 "9023: Array member(s) not at required physical locations"},
350 {0x07278800, 0, 1,
351 "9024: Array not functional due to present hardware configuration"},
352 {0x07278900, 0, 1,
353 "9026: Array not functional due to present hardware configuration"},
354 {0x07278A00, 0, 1,
355 "9027: Array is missing a device and parity is out of sync"},
356 {0x07278B00, 0, 1,
357 "9028: Maximum number of arrays already exist"},
358 {0x07278C00, 0, 1,
359 "9050: Required cache data cannot be located for a disk unit"},
360 {0x07278D00, 0, 1,
361 "9052: Cache data exists for a device that has been modified"},
362 {0x07278F00, 0, 1,
363 "9054: IOA resources not available due to previous problems"},
364 {0x07279100, 0, 1,
365 "9092: Disk unit requires initialization before use"},
366 {0x07279200, 0, 1,
367 "9029: Incorrect hardware configuration change has been detected"},
368 {0x07279600, 0, 1,
369 "9060: One or more disk pairs are missing from an array"},
370 {0x07279700, 0, 1,
371 "9061: One or more disks are missing from an array"},
372 {0x07279800, 0, 1,
373 "9062: One or more disks are missing from an array"},
374 {0x07279900, 0, 1,
375 "9063: Maximum number of functional arrays has been exceeded"},
376 {0x0B260000, 0, 0,
377 "Aborted command, invalid descriptor"},
378 {0x0B5A0000, 0, 0,
379 "Command terminated by host"}
380};
381
382static const struct ipr_ses_table_entry ipr_ses_table[] = {
383 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
384 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
385 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
386 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
387 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
388 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
389 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
390 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
391 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
392 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
393 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
394 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
395 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
396};
397
398/*
399 * Function Prototypes
400 */
401static int ipr_reset_alert(struct ipr_cmnd *);
402static void ipr_process_ccn(struct ipr_cmnd *);
403static void ipr_process_error(struct ipr_cmnd *);
404static void ipr_reset_ioa_job(struct ipr_cmnd *);
405static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
406 enum ipr_shutdown_type);
407
408#ifdef CONFIG_SCSI_IPR_TRACE
409/**
410 * ipr_trc_hook - Add a trace entry to the driver trace
411 * @ipr_cmd: ipr command struct
412 * @type: trace type
413 * @add_data: additional data
414 *
415 * Return value:
416 * none
417 **/
418static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
419 u8 type, u32 add_data)
420{
421 struct ipr_trace_entry *trace_entry;
422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
423
424 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
425 trace_entry->time = jiffies;
426 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
427 trace_entry->type = type;
428 trace_entry->cmd_index = ipr_cmd->cmd_index;
429 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
430 trace_entry->u.add_data = add_data;
431}
432#else
433#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
434#endif
435
436/**
437 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
438 * @ipr_cmd: ipr command struct
439 *
440 * Return value:
441 * none
442 **/
443static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
444{
445 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
446 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
447
448 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
449 ioarcb->write_data_transfer_length = 0;
450 ioarcb->read_data_transfer_length = 0;
451 ioarcb->write_ioadl_len = 0;
452 ioarcb->read_ioadl_len = 0;
453 ioasa->ioasc = 0;
454 ioasa->residual_data_len = 0;
455
456 ipr_cmd->scsi_cmd = NULL;
457 ipr_cmd->sense_buffer[0] = 0;
458 ipr_cmd->dma_use_sg = 0;
459}
460
461/**
462 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
463 * @ipr_cmd: ipr command struct
464 *
465 * Return value:
466 * none
467 **/
468static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
469{
470 ipr_reinit_ipr_cmnd(ipr_cmd);
471 ipr_cmd->u.scratch = 0;
472 ipr_cmd->sibling = NULL;
473 init_timer(&ipr_cmd->timer);
474}
475
476/**
477 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
478 * @ioa_cfg: ioa config struct
479 *
480 * Return value:
481 * pointer to ipr command struct
482 **/
483static
484struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
485{
486 struct ipr_cmnd *ipr_cmd;
487
488 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
489 list_del(&ipr_cmd->queue);
490 ipr_init_ipr_cmnd(ipr_cmd);
491
492 return ipr_cmd;
493}
494
495/**
496 * ipr_unmap_sglist - Unmap scatterlist if mapped
497 * @ioa_cfg: ioa config struct
498 * @ipr_cmd: ipr command struct
499 *
500 * Return value:
501 * nothing
502 **/
503static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
504 struct ipr_cmnd *ipr_cmd)
505{
506 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
507
508 if (ipr_cmd->dma_use_sg) {
509 if (scsi_cmd->use_sg > 0) {
510 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
511 scsi_cmd->use_sg,
512 scsi_cmd->sc_data_direction);
513 } else {
514 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
515 scsi_cmd->request_bufflen,
516 scsi_cmd->sc_data_direction);
517 }
518 }
519}
520
521/**
522 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
523 * @ioa_cfg: ioa config struct
524 * @clr_ints: interrupts to clear
525 *
526 * This function masks all interrupts on the adapter, then clears the
527 * interrupts specified in the mask
528 *
529 * Return value:
530 * none
531 **/
532static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
533 u32 clr_ints)
534{
535 volatile u32 int_reg;
536
537 /* Stop new interrupts */
538 ioa_cfg->allow_interrupts = 0;
539
540 /* Set interrupt mask to stop all new interrupts */
541 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
542
543 /* Clear any pending interrupts */
544 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
545 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
546}
547
548/**
549 * ipr_save_pcix_cmd_reg - Save PCI-X command register
550 * @ioa_cfg: ioa config struct
551 *
552 * Return value:
553 * 0 on success / -EIO on failure
554 **/
555static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
556{
557 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
558
559 if (pcix_cmd_reg == 0) {
560 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
561 return -EIO;
562 }
563
564 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
565 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
566 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
567 return -EIO;
568 }
569
570 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
571 return 0;
572}
573
574/**
575 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
576 * @ioa_cfg: ioa config struct
577 *
578 * Return value:
579 * 0 on success / -EIO on failure
580 **/
581static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
582{
583 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
584
585 if (pcix_cmd_reg) {
586 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
587 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
588 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
589 return -EIO;
590 }
591 } else {
592 dev_err(&ioa_cfg->pdev->dev,
593 "Failed to setup PCI-X command register\n");
594 return -EIO;
595 }
596
597 return 0;
598}
599
600/**
601 * ipr_scsi_eh_done - mid-layer done function for aborted ops
602 * @ipr_cmd: ipr command struct
603 *
604 * This function is invoked by the interrupt handler for
605 * ops generated by the SCSI mid-layer which are being aborted.
606 *
607 * Return value:
608 * none
609 **/
610static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
611{
612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
613 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
614
615 scsi_cmd->result |= (DID_ERROR << 16);
616
617 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
618 scsi_cmd->scsi_done(scsi_cmd);
619 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
620}
621
622/**
623 * ipr_fail_all_ops - Fails all outstanding ops.
624 * @ioa_cfg: ioa config struct
625 *
626 * This function fails all outstanding ops.
627 *
628 * Return value:
629 * none
630 **/
631static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
632{
633 struct ipr_cmnd *ipr_cmd, *temp;
634
635 ENTER;
636 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
637 list_del(&ipr_cmd->queue);
638
639 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
640 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
641
642 if (ipr_cmd->scsi_cmd)
643 ipr_cmd->done = ipr_scsi_eh_done;
644
645 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
646 del_timer(&ipr_cmd->timer);
647 ipr_cmd->done(ipr_cmd);
648 }
649
650 LEAVE;
651}
652
653/**
654 * ipr_do_req - Send driver initiated requests.
655 * @ipr_cmd: ipr command struct
656 * @done: done function
657 * @timeout_func: timeout function
658 * @timeout: timeout value
659 *
660 * This function sends the specified command to the adapter with the
661 * timeout given. The done function is invoked on command completion.
662 *
663 * Return value:
664 * none
665 **/
666static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
667 void (*done) (struct ipr_cmnd *),
668 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
669{
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
671
672 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
673
674 ipr_cmd->done = done;
675
676 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
677 ipr_cmd->timer.expires = jiffies + timeout;
678 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
679
680 add_timer(&ipr_cmd->timer);
681
682 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
683
684 mb();
685 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
686 ioa_cfg->regs.ioarrin_reg);
687}
688
689/**
690 * ipr_internal_cmd_done - Op done function for an internally generated op.
691 * @ipr_cmd: ipr command struct
692 *
693 * This function is the op done function for an internally generated,
694 * blocking op. It simply wakes the sleeping thread.
695 *
696 * Return value:
697 * none
698 **/
699static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
700{
701 if (ipr_cmd->sibling)
702 ipr_cmd->sibling = NULL;
703 else
704 complete(&ipr_cmd->completion);
705}
706
707/**
708 * ipr_send_blocking_cmd - Send command and sleep on its completion.
709 * @ipr_cmd: ipr command struct
710 * @timeout_func: function to invoke if command times out
711 * @timeout: timeout
712 *
713 * Return value:
714 * none
715 **/
716static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
717 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
718 u32 timeout)
719{
720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
721
722 init_completion(&ipr_cmd->completion);
723 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
724
725 spin_unlock_irq(ioa_cfg->host->host_lock);
726 wait_for_completion(&ipr_cmd->completion);
727 spin_lock_irq(ioa_cfg->host->host_lock);
728}
729
730/**
731 * ipr_send_hcam - Send an HCAM to the adapter.
732 * @ioa_cfg: ioa config struct
733 * @type: HCAM type
734 * @hostrcb: hostrcb struct
735 *
736 * This function will send a Host Controlled Async command to the adapter.
737 * If HCAMs are currently not allowed to be issued to the adapter, it will
738 * place the hostrcb on the free queue.
739 *
740 * Return value:
741 * none
742 **/
743static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
744 struct ipr_hostrcb *hostrcb)
745{
746 struct ipr_cmnd *ipr_cmd;
747 struct ipr_ioarcb *ioarcb;
748
749 if (ioa_cfg->allow_cmds) {
750 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
751 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
752 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
753
754 ipr_cmd->u.hostrcb = hostrcb;
755 ioarcb = &ipr_cmd->ioarcb;
756
757 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
758 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
759 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
760 ioarcb->cmd_pkt.cdb[1] = type;
761 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
762 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
763
764 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
765 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
766 ipr_cmd->ioadl[0].flags_and_data_len =
767 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
768 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
769
770 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
771 ipr_cmd->done = ipr_process_ccn;
772 else
773 ipr_cmd->done = ipr_process_error;
774
775 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
776
777 mb();
778 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
779 ioa_cfg->regs.ioarrin_reg);
780 } else {
781 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
782 }
783}
784
785/**
786 * ipr_init_res_entry - Initialize a resource entry struct.
787 * @res: resource entry struct
788 *
789 * Return value:
790 * none
791 **/
792static void ipr_init_res_entry(struct ipr_resource_entry *res)
793{
ee0a90fa 794 res->needs_sync_complete = 0;
1da177e4
LT
795 res->in_erp = 0;
796 res->add_to_ml = 0;
797 res->del_from_ml = 0;
798 res->resetting_device = 0;
799 res->sdev = NULL;
800}
801
802/**
803 * ipr_handle_config_change - Handle a config change from the adapter
804 * @ioa_cfg: ioa config struct
805 * @hostrcb: hostrcb
806 *
807 * Return value:
808 * none
809 **/
810static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
811 struct ipr_hostrcb *hostrcb)
812{
813 struct ipr_resource_entry *res = NULL;
814 struct ipr_config_table_entry *cfgte;
815 u32 is_ndn = 1;
816
817 cfgte = &hostrcb->hcam.u.ccn.cfgte;
818
819 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
820 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
821 sizeof(cfgte->res_addr))) {
822 is_ndn = 0;
823 break;
824 }
825 }
826
827 if (is_ndn) {
828 if (list_empty(&ioa_cfg->free_res_q)) {
829 ipr_send_hcam(ioa_cfg,
830 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
831 hostrcb);
832 return;
833 }
834
835 res = list_entry(ioa_cfg->free_res_q.next,
836 struct ipr_resource_entry, queue);
837
838 list_del(&res->queue);
839 ipr_init_res_entry(res);
840 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
841 }
842
843 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
844
845 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
846 if (res->sdev) {
1da177e4 847 res->del_from_ml = 1;
1121b794 848 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
849 if (ioa_cfg->allow_ml_add_del)
850 schedule_work(&ioa_cfg->work_q);
851 } else
852 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
853 } else if (!res->sdev) {
854 res->add_to_ml = 1;
855 if (ioa_cfg->allow_ml_add_del)
856 schedule_work(&ioa_cfg->work_q);
857 }
858
859 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
860}
861
862/**
863 * ipr_process_ccn - Op done function for a CCN.
864 * @ipr_cmd: ipr command struct
865 *
866 * This function is the op done function for a configuration
867 * change notification host controlled async from the adapter.
868 *
869 * Return value:
870 * none
871 **/
872static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
873{
874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
875 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
876 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
877
878 list_del(&hostrcb->queue);
879 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
880
881 if (ioasc) {
882 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
883 dev_err(&ioa_cfg->pdev->dev,
884 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
885
886 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
887 } else {
888 ipr_handle_config_change(ioa_cfg, hostrcb);
889 }
890}
891
892/**
893 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 894 * @vpd: vendor/product id/sn struct
1da177e4
LT
895 *
896 * Return value:
897 * none
898 **/
cfc32139 899static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
900{
901 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
902 + IPR_SERIAL_NUM_LEN];
903
cfc32139 904 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
905 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
906 IPR_PROD_ID_LEN);
907 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
908 ipr_err("Vendor/Product ID: %s\n", buffer);
909
cfc32139 910 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
911 buffer[IPR_SERIAL_NUM_LEN] = '\0';
912 ipr_err(" Serial Number: %s\n", buffer);
913}
914
ee0f05b8 915/**
916 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
917 * @vpd: vendor/product id/sn/wwn struct
918 *
919 * Return value:
920 * none
921 **/
922static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
923{
924 ipr_log_vpd(&vpd->vpd);
925 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
926 be32_to_cpu(vpd->wwid[1]));
927}
928
929/**
930 * ipr_log_enhanced_cache_error - Log a cache error.
931 * @ioa_cfg: ioa config struct
932 * @hostrcb: hostrcb struct
933 *
934 * Return value:
935 * none
936 **/
937static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
938 struct ipr_hostrcb *hostrcb)
939{
940 struct ipr_hostrcb_type_12_error *error =
941 &hostrcb->hcam.u.error.u.type_12_error;
942
943 ipr_err("-----Current Configuration-----\n");
944 ipr_err("Cache Directory Card Information:\n");
945 ipr_log_ext_vpd(&error->ioa_vpd);
946 ipr_err("Adapter Card Information:\n");
947 ipr_log_ext_vpd(&error->cfc_vpd);
948
949 ipr_err("-----Expected Configuration-----\n");
950 ipr_err("Cache Directory Card Information:\n");
951 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
952 ipr_err("Adapter Card Information:\n");
953 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
954
955 ipr_err("Additional IOA Data: %08X %08X %08X\n",
956 be32_to_cpu(error->ioa_data[0]),
957 be32_to_cpu(error->ioa_data[1]),
958 be32_to_cpu(error->ioa_data[2]));
959}
960
1da177e4
LT
961/**
962 * ipr_log_cache_error - Log a cache error.
963 * @ioa_cfg: ioa config struct
964 * @hostrcb: hostrcb struct
965 *
966 * Return value:
967 * none
968 **/
969static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
970 struct ipr_hostrcb *hostrcb)
971{
972 struct ipr_hostrcb_type_02_error *error =
973 &hostrcb->hcam.u.error.u.type_02_error;
974
975 ipr_err("-----Current Configuration-----\n");
976 ipr_err("Cache Directory Card Information:\n");
cfc32139 977 ipr_log_vpd(&error->ioa_vpd);
1da177e4 978 ipr_err("Adapter Card Information:\n");
cfc32139 979 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
980
981 ipr_err("-----Expected Configuration-----\n");
982 ipr_err("Cache Directory Card Information:\n");
cfc32139 983 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 984 ipr_err("Adapter Card Information:\n");
cfc32139 985 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
986
987 ipr_err("Additional IOA Data: %08X %08X %08X\n",
988 be32_to_cpu(error->ioa_data[0]),
989 be32_to_cpu(error->ioa_data[1]),
990 be32_to_cpu(error->ioa_data[2]));
991}
992
ee0f05b8 993/**
994 * ipr_log_enhanced_config_error - Log a configuration error.
995 * @ioa_cfg: ioa config struct
996 * @hostrcb: hostrcb struct
997 *
998 * Return value:
999 * none
1000 **/
1001static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1002 struct ipr_hostrcb *hostrcb)
1003{
1004 int errors_logged, i;
1005 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1006 struct ipr_hostrcb_type_13_error *error;
1007
1008 error = &hostrcb->hcam.u.error.u.type_13_error;
1009 errors_logged = be32_to_cpu(error->errors_logged);
1010
1011 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1012 be32_to_cpu(error->errors_detected), errors_logged);
1013
1014 dev_entry = error->dev;
1015
1016 for (i = 0; i < errors_logged; i++, dev_entry++) {
1017 ipr_err_separator;
1018
1019 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1020 ipr_log_ext_vpd(&dev_entry->vpd);
1021
1022 ipr_err("-----New Device Information-----\n");
1023 ipr_log_ext_vpd(&dev_entry->new_vpd);
1024
1025 ipr_err("Cache Directory Card Information:\n");
1026 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1027
1028 ipr_err("Adapter Card Information:\n");
1029 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1030 }
1031}
1032
1da177e4
LT
1033/**
1034 * ipr_log_config_error - Log a configuration error.
1035 * @ioa_cfg: ioa config struct
1036 * @hostrcb: hostrcb struct
1037 *
1038 * Return value:
1039 * none
1040 **/
1041static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1042 struct ipr_hostrcb *hostrcb)
1043{
1044 int errors_logged, i;
1045 struct ipr_hostrcb_device_data_entry *dev_entry;
1046 struct ipr_hostrcb_type_03_error *error;
1047
1048 error = &hostrcb->hcam.u.error.u.type_03_error;
1049 errors_logged = be32_to_cpu(error->errors_logged);
1050
1051 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1052 be32_to_cpu(error->errors_detected), errors_logged);
1053
cfc32139 1054 dev_entry = error->dev;
1da177e4
LT
1055
1056 for (i = 0; i < errors_logged; i++, dev_entry++) {
1057 ipr_err_separator;
1058
fa15b1f6 1059 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1060 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1061
1062 ipr_err("-----New Device Information-----\n");
cfc32139 1063 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1064
1065 ipr_err("Cache Directory Card Information:\n");
cfc32139 1066 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1067
1068 ipr_err("Adapter Card Information:\n");
cfc32139 1069 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1070
1071 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1072 be32_to_cpu(dev_entry->ioa_data[0]),
1073 be32_to_cpu(dev_entry->ioa_data[1]),
1074 be32_to_cpu(dev_entry->ioa_data[2]),
1075 be32_to_cpu(dev_entry->ioa_data[3]),
1076 be32_to_cpu(dev_entry->ioa_data[4]));
1077 }
1078}
1079
ee0f05b8 1080/**
1081 * ipr_log_enhanced_array_error - Log an array configuration error.
1082 * @ioa_cfg: ioa config struct
1083 * @hostrcb: hostrcb struct
1084 *
1085 * Return value:
1086 * none
1087 **/
1088static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1089 struct ipr_hostrcb *hostrcb)
1090{
1091 int i, num_entries;
1092 struct ipr_hostrcb_type_14_error *error;
1093 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1094 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1095
1096 error = &hostrcb->hcam.u.error.u.type_14_error;
1097
1098 ipr_err_separator;
1099
1100 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1101 error->protection_level,
1102 ioa_cfg->host->host_no,
1103 error->last_func_vset_res_addr.bus,
1104 error->last_func_vset_res_addr.target,
1105 error->last_func_vset_res_addr.lun);
1106
1107 ipr_err_separator;
1108
1109 array_entry = error->array_member;
1110 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1111 sizeof(error->array_member));
1112
1113 for (i = 0; i < num_entries; i++, array_entry++) {
1114 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1115 continue;
1116
1117 if (be32_to_cpu(error->exposed_mode_adn) == i)
1118 ipr_err("Exposed Array Member %d:\n", i);
1119 else
1120 ipr_err("Array Member %d:\n", i);
1121
1122 ipr_log_ext_vpd(&array_entry->vpd);
1123 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1124 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1125 "Expected Location");
1126
1127 ipr_err_separator;
1128 }
1129}
1130
1da177e4
LT
1131/**
1132 * ipr_log_array_error - Log an array configuration error.
1133 * @ioa_cfg: ioa config struct
1134 * @hostrcb: hostrcb struct
1135 *
1136 * Return value:
1137 * none
1138 **/
1139static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1140 struct ipr_hostrcb *hostrcb)
1141{
1142 int i;
1143 struct ipr_hostrcb_type_04_error *error;
1144 struct ipr_hostrcb_array_data_entry *array_entry;
1145 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1146
1147 error = &hostrcb->hcam.u.error.u.type_04_error;
1148
1149 ipr_err_separator;
1150
1151 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1152 error->protection_level,
1153 ioa_cfg->host->host_no,
1154 error->last_func_vset_res_addr.bus,
1155 error->last_func_vset_res_addr.target,
1156 error->last_func_vset_res_addr.lun);
1157
1158 ipr_err_separator;
1159
1160 array_entry = error->array_member;
1161
1162 for (i = 0; i < 18; i++) {
cfc32139 1163 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1164 continue;
1165
fa15b1f6 1166 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1167 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1168 else
1da177e4 1169 ipr_err("Array Member %d:\n", i);
1da177e4 1170
cfc32139 1171 ipr_log_vpd(&array_entry->vpd);
1da177e4 1172
fa15b1f6 1173 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1174 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1175 "Expected Location");
1da177e4
LT
1176
1177 ipr_err_separator;
1178
1179 if (i == 9)
1180 array_entry = error->array_member2;
1181 else
1182 array_entry++;
1183 }
1184}
1185
1186/**
b0df54bb 1187 * ipr_log_hex_data - Log additional hex IOA error data.
1188 * @data: IOA error data
1189 * @len: data length
1da177e4
LT
1190 *
1191 * Return value:
1192 * none
1193 **/
b0df54bb 1194static void ipr_log_hex_data(u32 *data, int len)
1da177e4
LT
1195{
1196 int i;
1da177e4 1197
b0df54bb 1198 if (len == 0)
1da177e4
LT
1199 return;
1200
b0df54bb 1201 for (i = 0; i < len / 4; i += 4) {
1da177e4 1202 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1203 be32_to_cpu(data[i]),
1204 be32_to_cpu(data[i+1]),
1205 be32_to_cpu(data[i+2]),
1206 be32_to_cpu(data[i+3]));
1da177e4
LT
1207 }
1208}
1209
ee0f05b8 1210/**
1211 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1212 * @ioa_cfg: ioa config struct
1213 * @hostrcb: hostrcb struct
1214 *
1215 * Return value:
1216 * none
1217 **/
1218static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1219 struct ipr_hostrcb *hostrcb)
1220{
1221 struct ipr_hostrcb_type_17_error *error;
1222
1223 error = &hostrcb->hcam.u.error.u.type_17_error;
1224 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1225
1226 ipr_err("%s\n", error->failure_reason);
1227 ipr_err("Remote Adapter VPD:\n");
1228 ipr_log_ext_vpd(&error->vpd);
1229 ipr_log_hex_data(error->data,
1230 be32_to_cpu(hostrcb->hcam.length) -
1231 (offsetof(struct ipr_hostrcb_error, u) +
1232 offsetof(struct ipr_hostrcb_type_17_error, data)));
1233}
1234
b0df54bb 1235/**
1236 * ipr_log_dual_ioa_error - Log a dual adapter error.
1237 * @ioa_cfg: ioa config struct
1238 * @hostrcb: hostrcb struct
1239 *
1240 * Return value:
1241 * none
1242 **/
1243static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1244 struct ipr_hostrcb *hostrcb)
1245{
1246 struct ipr_hostrcb_type_07_error *error;
1247
1248 error = &hostrcb->hcam.u.error.u.type_07_error;
1249 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1250
1251 ipr_err("%s\n", error->failure_reason);
1252 ipr_err("Remote Adapter VPD:\n");
1253 ipr_log_vpd(&error->vpd);
1254 ipr_log_hex_data(error->data,
1255 be32_to_cpu(hostrcb->hcam.length) -
1256 (offsetof(struct ipr_hostrcb_error, u) +
1257 offsetof(struct ipr_hostrcb_type_07_error, data)));
1258}
1259
1260/**
1261 * ipr_log_generic_error - Log an adapter error.
1262 * @ioa_cfg: ioa config struct
1263 * @hostrcb: hostrcb struct
1264 *
1265 * Return value:
1266 * none
1267 **/
1268static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1269 struct ipr_hostrcb *hostrcb)
1270{
1271 ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1272 be32_to_cpu(hostrcb->hcam.length));
1273}
1274
1da177e4
LT
1275/**
1276 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1277 * @ioasc: IOASC
1278 *
1279 * This function will return the index of into the ipr_error_table
1280 * for the specified IOASC. If the IOASC is not in the table,
1281 * 0 will be returned, which points to the entry used for unknown errors.
1282 *
1283 * Return value:
1284 * index into the ipr_error_table
1285 **/
1286static u32 ipr_get_error(u32 ioasc)
1287{
1288 int i;
1289
1290 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1291 if (ipr_error_table[i].ioasc == ioasc)
1292 return i;
1293
1294 return 0;
1295}
1296
1297/**
1298 * ipr_handle_log_data - Log an adapter error.
1299 * @ioa_cfg: ioa config struct
1300 * @hostrcb: hostrcb struct
1301 *
1302 * This function logs an adapter error to the system.
1303 *
1304 * Return value:
1305 * none
1306 **/
1307static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1308 struct ipr_hostrcb *hostrcb)
1309{
1310 u32 ioasc;
1311 int error_index;
1312
1313 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1314 return;
1315
1316 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1317 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1318
1319 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1320
1321 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1322 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1323 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1324 scsi_report_bus_reset(ioa_cfg->host,
1325 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1326 }
1327
1328 error_index = ipr_get_error(ioasc);
1329
1330 if (!ipr_error_table[error_index].log_hcam)
1331 return;
1332
1333 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
fb3ed3cb
BK
1334 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1335 "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1336 } else {
1337 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1338 ipr_error_table[error_index].error);
1339 }
1340
1341 /* Set indication we have logged an error */
1342 ioa_cfg->errors_logged++;
1343
1344 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1345 return;
cf852037 1346 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1347 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1348
1349 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1350 case IPR_HOST_RCB_OVERLAY_ID_2:
1351 ipr_log_cache_error(ioa_cfg, hostrcb);
1352 break;
1353 case IPR_HOST_RCB_OVERLAY_ID_3:
1354 ipr_log_config_error(ioa_cfg, hostrcb);
1355 break;
1356 case IPR_HOST_RCB_OVERLAY_ID_4:
1357 case IPR_HOST_RCB_OVERLAY_ID_6:
1358 ipr_log_array_error(ioa_cfg, hostrcb);
1359 break;
b0df54bb 1360 case IPR_HOST_RCB_OVERLAY_ID_7:
1361 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1362 break;
ee0f05b8 1363 case IPR_HOST_RCB_OVERLAY_ID_12:
1364 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1365 break;
1366 case IPR_HOST_RCB_OVERLAY_ID_13:
1367 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1368 break;
1369 case IPR_HOST_RCB_OVERLAY_ID_14:
1370 case IPR_HOST_RCB_OVERLAY_ID_16:
1371 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1372 break;
1373 case IPR_HOST_RCB_OVERLAY_ID_17:
1374 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1375 break;
cf852037 1376 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1377 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1378 default:
a9cfca96 1379 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1380 break;
1381 }
1382}
1383
1384/**
1385 * ipr_process_error - Op done function for an adapter error log.
1386 * @ipr_cmd: ipr command struct
1387 *
1388 * This function is the op done function for an error log host
1389 * controlled async from the adapter. It will log the error and
1390 * send the HCAM back to the adapter.
1391 *
1392 * Return value:
1393 * none
1394 **/
1395static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1396{
1397 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1398 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1399 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1400
1401 list_del(&hostrcb->queue);
1402 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1403
1404 if (!ioasc) {
1405 ipr_handle_log_data(ioa_cfg, hostrcb);
1406 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1407 dev_err(&ioa_cfg->pdev->dev,
1408 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1409 }
1410
1411 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1412}
1413
1414/**
1415 * ipr_timeout - An internally generated op has timed out.
1416 * @ipr_cmd: ipr command struct
1417 *
1418 * This function blocks host requests and initiates an
1419 * adapter reset.
1420 *
1421 * Return value:
1422 * none
1423 **/
1424static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1425{
1426 unsigned long lock_flags = 0;
1427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1428
1429 ENTER;
1430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1431
1432 ioa_cfg->errors_logged++;
1433 dev_err(&ioa_cfg->pdev->dev,
1434 "Adapter being reset due to command timeout.\n");
1435
1436 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1437 ioa_cfg->sdt_state = GET_DUMP;
1438
1439 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1440 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1441
1442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1443 LEAVE;
1444}
1445
1446/**
1447 * ipr_oper_timeout - Adapter timed out transitioning to operational
1448 * @ipr_cmd: ipr command struct
1449 *
1450 * This function blocks host requests and initiates an
1451 * adapter reset.
1452 *
1453 * Return value:
1454 * none
1455 **/
1456static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1457{
1458 unsigned long lock_flags = 0;
1459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1460
1461 ENTER;
1462 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1463
1464 ioa_cfg->errors_logged++;
1465 dev_err(&ioa_cfg->pdev->dev,
1466 "Adapter timed out transitioning to operational.\n");
1467
1468 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1469 ioa_cfg->sdt_state = GET_DUMP;
1470
1471 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1472 if (ipr_fastfail)
1473 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1474 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1475 }
1476
1477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1478 LEAVE;
1479}
1480
1481/**
1482 * ipr_reset_reload - Reset/Reload the IOA
1483 * @ioa_cfg: ioa config struct
1484 * @shutdown_type: shutdown type
1485 *
1486 * This function resets the adapter and re-initializes it.
1487 * This function assumes that all new host commands have been stopped.
1488 * Return value:
1489 * SUCCESS / FAILED
1490 **/
1491static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1492 enum ipr_shutdown_type shutdown_type)
1493{
1494 if (!ioa_cfg->in_reset_reload)
1495 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1496
1497 spin_unlock_irq(ioa_cfg->host->host_lock);
1498 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1499 spin_lock_irq(ioa_cfg->host->host_lock);
1500
1501 /* If we got hit with a host reset while we were already resetting
1502 the adapter for some reason, and the reset failed. */
1503 if (ioa_cfg->ioa_is_dead) {
1504 ipr_trace;
1505 return FAILED;
1506 }
1507
1508 return SUCCESS;
1509}
1510
1511/**
1512 * ipr_find_ses_entry - Find matching SES in SES table
1513 * @res: resource entry struct of SES
1514 *
1515 * Return value:
1516 * pointer to SES table entry / NULL on failure
1517 **/
1518static const struct ipr_ses_table_entry *
1519ipr_find_ses_entry(struct ipr_resource_entry *res)
1520{
1521 int i, j, matches;
1522 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1523
1524 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1525 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1526 if (ste->compare_product_id_byte[j] == 'X') {
1527 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1528 matches++;
1529 else
1530 break;
1531 } else
1532 matches++;
1533 }
1534
1535 if (matches == IPR_PROD_ID_LEN)
1536 return ste;
1537 }
1538
1539 return NULL;
1540}
1541
1542/**
1543 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1544 * @ioa_cfg: ioa config struct
1545 * @bus: SCSI bus
1546 * @bus_width: bus width
1547 *
1548 * Return value:
1549 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1550 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1551 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1552 * max 160MHz = max 320MB/sec).
1553 **/
1554static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1555{
1556 struct ipr_resource_entry *res;
1557 const struct ipr_ses_table_entry *ste;
1558 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1559
1560 /* Loop through each config table entry in the config table buffer */
1561 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1562 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1563 continue;
1564
1565 if (bus != res->cfgte.res_addr.bus)
1566 continue;
1567
1568 if (!(ste = ipr_find_ses_entry(res)))
1569 continue;
1570
1571 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1572 }
1573
1574 return max_xfer_rate;
1575}
1576
1577/**
1578 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1579 * @ioa_cfg: ioa config struct
1580 * @max_delay: max delay in micro-seconds to wait
1581 *
1582 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1583 *
1584 * Return value:
1585 * 0 on success / other on failure
1586 **/
1587static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1588{
1589 volatile u32 pcii_reg;
1590 int delay = 1;
1591
1592 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1593 while (delay < max_delay) {
1594 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1595
1596 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1597 return 0;
1598
1599 /* udelay cannot be used if delay is more than a few milliseconds */
1600 if ((delay / 1000) > MAX_UDELAY_MS)
1601 mdelay(delay / 1000);
1602 else
1603 udelay(delay);
1604
1605 delay += delay;
1606 }
1607 return -EIO;
1608}
1609
1610/**
1611 * ipr_get_ldump_data_section - Dump IOA memory
1612 * @ioa_cfg: ioa config struct
1613 * @start_addr: adapter address to dump
1614 * @dest: destination kernel buffer
1615 * @length_in_words: length to dump in 4 byte words
1616 *
1617 * Return value:
1618 * 0 on success / -EIO on failure
1619 **/
1620static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1621 u32 start_addr,
1622 __be32 *dest, u32 length_in_words)
1623{
1624 volatile u32 temp_pcii_reg;
1625 int i, delay = 0;
1626
1627 /* Write IOA interrupt reg starting LDUMP state */
1628 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1629 ioa_cfg->regs.set_uproc_interrupt_reg);
1630
1631 /* Wait for IO debug acknowledge */
1632 if (ipr_wait_iodbg_ack(ioa_cfg,
1633 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1634 dev_err(&ioa_cfg->pdev->dev,
1635 "IOA dump long data transfer timeout\n");
1636 return -EIO;
1637 }
1638
1639 /* Signal LDUMP interlocked - clear IO debug ack */
1640 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1641 ioa_cfg->regs.clr_interrupt_reg);
1642
1643 /* Write Mailbox with starting address */
1644 writel(start_addr, ioa_cfg->ioa_mailbox);
1645
1646 /* Signal address valid - clear IOA Reset alert */
1647 writel(IPR_UPROCI_RESET_ALERT,
1648 ioa_cfg->regs.clr_uproc_interrupt_reg);
1649
1650 for (i = 0; i < length_in_words; i++) {
1651 /* Wait for IO debug acknowledge */
1652 if (ipr_wait_iodbg_ack(ioa_cfg,
1653 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1654 dev_err(&ioa_cfg->pdev->dev,
1655 "IOA dump short data transfer timeout\n");
1656 return -EIO;
1657 }
1658
1659 /* Read data from mailbox and increment destination pointer */
1660 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1661 dest++;
1662
1663 /* For all but the last word of data, signal data received */
1664 if (i < (length_in_words - 1)) {
1665 /* Signal dump data received - Clear IO debug Ack */
1666 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1667 ioa_cfg->regs.clr_interrupt_reg);
1668 }
1669 }
1670
1671 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1672 writel(IPR_UPROCI_RESET_ALERT,
1673 ioa_cfg->regs.set_uproc_interrupt_reg);
1674
1675 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1676 ioa_cfg->regs.clr_uproc_interrupt_reg);
1677
1678 /* Signal dump data received - Clear IO debug Ack */
1679 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1680 ioa_cfg->regs.clr_interrupt_reg);
1681
1682 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1683 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1684 temp_pcii_reg =
1685 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1686
1687 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1688 return 0;
1689
1690 udelay(10);
1691 delay += 10;
1692 }
1693
1694 return 0;
1695}
1696
1697#ifdef CONFIG_SCSI_IPR_DUMP
1698/**
1699 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1700 * @ioa_cfg: ioa config struct
1701 * @pci_address: adapter address
1702 * @length: length of data to copy
1703 *
1704 * Copy data from PCI adapter to kernel buffer.
1705 * Note: length MUST be a 4 byte multiple
1706 * Return value:
1707 * 0 on success / other on failure
1708 **/
1709static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1710 unsigned long pci_address, u32 length)
1711{
1712 int bytes_copied = 0;
1713 int cur_len, rc, rem_len, rem_page_len;
1714 __be32 *page;
1715 unsigned long lock_flags = 0;
1716 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1717
1718 while (bytes_copied < length &&
1719 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1720 if (ioa_dump->page_offset >= PAGE_SIZE ||
1721 ioa_dump->page_offset == 0) {
1722 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1723
1724 if (!page) {
1725 ipr_trace;
1726 return bytes_copied;
1727 }
1728
1729 ioa_dump->page_offset = 0;
1730 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1731 ioa_dump->next_page_index++;
1732 } else
1733 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1734
1735 rem_len = length - bytes_copied;
1736 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1737 cur_len = min(rem_len, rem_page_len);
1738
1739 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1740 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1741 rc = -EIO;
1742 } else {
1743 rc = ipr_get_ldump_data_section(ioa_cfg,
1744 pci_address + bytes_copied,
1745 &page[ioa_dump->page_offset / 4],
1746 (cur_len / sizeof(u32)));
1747 }
1748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1749
1750 if (!rc) {
1751 ioa_dump->page_offset += cur_len;
1752 bytes_copied += cur_len;
1753 } else {
1754 ipr_trace;
1755 break;
1756 }
1757 schedule();
1758 }
1759
1760 return bytes_copied;
1761}
1762
1763/**
1764 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1765 * @hdr: dump entry header struct
1766 *
1767 * Return value:
1768 * nothing
1769 **/
1770static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1771{
1772 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1773 hdr->num_elems = 1;
1774 hdr->offset = sizeof(*hdr);
1775 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1776}
1777
1778/**
1779 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1780 * @ioa_cfg: ioa config struct
1781 * @driver_dump: driver dump struct
1782 *
1783 * Return value:
1784 * nothing
1785 **/
1786static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1787 struct ipr_driver_dump *driver_dump)
1788{
1789 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1790
1791 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1792 driver_dump->ioa_type_entry.hdr.len =
1793 sizeof(struct ipr_dump_ioa_type_entry) -
1794 sizeof(struct ipr_dump_entry_header);
1795 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1796 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1797 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1798 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1799 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1800 ucode_vpd->minor_release[1];
1801 driver_dump->hdr.num_entries++;
1802}
1803
1804/**
1805 * ipr_dump_version_data - Fill in the driver version in the dump.
1806 * @ioa_cfg: ioa config struct
1807 * @driver_dump: driver dump struct
1808 *
1809 * Return value:
1810 * nothing
1811 **/
1812static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1813 struct ipr_driver_dump *driver_dump)
1814{
1815 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1816 driver_dump->version_entry.hdr.len =
1817 sizeof(struct ipr_dump_version_entry) -
1818 sizeof(struct ipr_dump_entry_header);
1819 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1820 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1821 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1822 driver_dump->hdr.num_entries++;
1823}
1824
1825/**
1826 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1827 * @ioa_cfg: ioa config struct
1828 * @driver_dump: driver dump struct
1829 *
1830 * Return value:
1831 * nothing
1832 **/
1833static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1834 struct ipr_driver_dump *driver_dump)
1835{
1836 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1837 driver_dump->trace_entry.hdr.len =
1838 sizeof(struct ipr_dump_trace_entry) -
1839 sizeof(struct ipr_dump_entry_header);
1840 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1841 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1842 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1843 driver_dump->hdr.num_entries++;
1844}
1845
1846/**
1847 * ipr_dump_location_data - Fill in the IOA location in the dump.
1848 * @ioa_cfg: ioa config struct
1849 * @driver_dump: driver dump struct
1850 *
1851 * Return value:
1852 * nothing
1853 **/
1854static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1855 struct ipr_driver_dump *driver_dump)
1856{
1857 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1858 driver_dump->location_entry.hdr.len =
1859 sizeof(struct ipr_dump_location_entry) -
1860 sizeof(struct ipr_dump_entry_header);
1861 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1862 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1863 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1864 driver_dump->hdr.num_entries++;
1865}
1866
1867/**
1868 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1869 * @ioa_cfg: ioa config struct
1870 * @dump: dump struct
1871 *
1872 * Return value:
1873 * nothing
1874 **/
1875static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1876{
1877 unsigned long start_addr, sdt_word;
1878 unsigned long lock_flags = 0;
1879 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1880 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1881 u32 num_entries, start_off, end_off;
1882 u32 bytes_to_copy, bytes_copied, rc;
1883 struct ipr_sdt *sdt;
1884 int i;
1885
1886 ENTER;
1887
1888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1889
1890 if (ioa_cfg->sdt_state != GET_DUMP) {
1891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1892 return;
1893 }
1894
1895 start_addr = readl(ioa_cfg->ioa_mailbox);
1896
1897 if (!ipr_sdt_is_fmt2(start_addr)) {
1898 dev_err(&ioa_cfg->pdev->dev,
1899 "Invalid dump table format: %lx\n", start_addr);
1900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1901 return;
1902 }
1903
1904 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1905
1906 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1907
1908 /* Initialize the overall dump header */
1909 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1910 driver_dump->hdr.num_entries = 1;
1911 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1912 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1913 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1914 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1915
1916 ipr_dump_version_data(ioa_cfg, driver_dump);
1917 ipr_dump_location_data(ioa_cfg, driver_dump);
1918 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1919 ipr_dump_trace_data(ioa_cfg, driver_dump);
1920
1921 /* Update dump_header */
1922 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1923
1924 /* IOA Dump entry */
1925 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1926 ioa_dump->format = IPR_SDT_FMT2;
1927 ioa_dump->hdr.len = 0;
1928 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1929 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1930
1931 /* First entries in sdt are actually a list of dump addresses and
1932 lengths to gather the real dump data. sdt represents the pointer
1933 to the ioa generated dump table. Dump data will be extracted based
1934 on entries in this table */
1935 sdt = &ioa_dump->sdt;
1936
1937 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1938 sizeof(struct ipr_sdt) / sizeof(__be32));
1939
1940 /* Smart Dump table is ready to use and the first entry is valid */
1941 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1942 dev_err(&ioa_cfg->pdev->dev,
1943 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1944 rc, be32_to_cpu(sdt->hdr.state));
1945 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1946 ioa_cfg->sdt_state = DUMP_OBTAINED;
1947 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1948 return;
1949 }
1950
1951 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1952
1953 if (num_entries > IPR_NUM_SDT_ENTRIES)
1954 num_entries = IPR_NUM_SDT_ENTRIES;
1955
1956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1957
1958 for (i = 0; i < num_entries; i++) {
1959 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1960 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1961 break;
1962 }
1963
1964 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1965 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1966 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1967 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1968
1969 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1970 bytes_to_copy = end_off - start_off;
1971 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1972 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1973 continue;
1974 }
1975
1976 /* Copy data from adapter to driver buffers */
1977 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1978 bytes_to_copy);
1979
1980 ioa_dump->hdr.len += bytes_copied;
1981
1982 if (bytes_copied != bytes_to_copy) {
1983 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1984 break;
1985 }
1986 }
1987 }
1988 }
1989
1990 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1991
1992 /* Update dump_header */
1993 driver_dump->hdr.len += ioa_dump->hdr.len;
1994 wmb();
1995 ioa_cfg->sdt_state = DUMP_OBTAINED;
1996 LEAVE;
1997}
1998
1999#else
2000#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2001#endif
2002
2003/**
2004 * ipr_release_dump - Free adapter dump memory
2005 * @kref: kref struct
2006 *
2007 * Return value:
2008 * nothing
2009 **/
2010static void ipr_release_dump(struct kref *kref)
2011{
2012 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2013 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2014 unsigned long lock_flags = 0;
2015 int i;
2016
2017 ENTER;
2018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2019 ioa_cfg->dump = NULL;
2020 ioa_cfg->sdt_state = INACTIVE;
2021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2022
2023 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2024 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2025
2026 kfree(dump);
2027 LEAVE;
2028}
2029
2030/**
2031 * ipr_worker_thread - Worker thread
2032 * @data: ioa config struct
2033 *
2034 * Called at task level from a work thread. This function takes care
2035 * of adding and removing device from the mid-layer as configuration
2036 * changes are detected by the adapter.
2037 *
2038 * Return value:
2039 * nothing
2040 **/
2041static void ipr_worker_thread(void *data)
2042{
2043 unsigned long lock_flags;
2044 struct ipr_resource_entry *res;
2045 struct scsi_device *sdev;
2046 struct ipr_dump *dump;
2047 struct ipr_ioa_cfg *ioa_cfg = data;
2048 u8 bus, target, lun;
2049 int did_work;
2050
2051 ENTER;
2052 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2053
2054 if (ioa_cfg->sdt_state == GET_DUMP) {
2055 dump = ioa_cfg->dump;
2056 if (!dump) {
2057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2058 return;
2059 }
2060 kref_get(&dump->kref);
2061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2062 ipr_get_ioa_dump(ioa_cfg, dump);
2063 kref_put(&dump->kref, ipr_release_dump);
2064
2065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2066 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2067 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2069 return;
2070 }
2071
2072restart:
2073 do {
2074 did_work = 0;
2075 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2077 return;
2078 }
2079
2080 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2081 if (res->del_from_ml && res->sdev) {
2082 did_work = 1;
2083 sdev = res->sdev;
2084 if (!scsi_device_get(sdev)) {
1da177e4
LT
2085 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2087 scsi_remove_device(sdev);
2088 scsi_device_put(sdev);
2089 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2090 }
2091 break;
2092 }
2093 }
2094 } while(did_work);
2095
2096 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2097 if (res->add_to_ml) {
2098 bus = res->cfgte.res_addr.bus;
2099 target = res->cfgte.res_addr.target;
2100 lun = res->cfgte.res_addr.lun;
1121b794 2101 res->add_to_ml = 0;
1da177e4
LT
2102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2103 scsi_add_device(ioa_cfg->host, bus, target, lun);
2104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2105 goto restart;
2106 }
2107 }
2108
2109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
312c004d 2110 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
1da177e4
LT
2111 LEAVE;
2112}
2113
2114#ifdef CONFIG_SCSI_IPR_TRACE
2115/**
2116 * ipr_read_trace - Dump the adapter trace
2117 * @kobj: kobject struct
2118 * @buf: buffer
2119 * @off: offset
2120 * @count: buffer size
2121 *
2122 * Return value:
2123 * number of bytes printed to buffer
2124 **/
2125static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2126 loff_t off, size_t count)
2127{
2128 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2129 struct Scsi_Host *shost = class_to_shost(cdev);
2130 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2131 unsigned long lock_flags = 0;
2132 int size = IPR_TRACE_SIZE;
2133 char *src = (char *)ioa_cfg->trace;
2134
2135 if (off > size)
2136 return 0;
2137 if (off + count > size) {
2138 size -= off;
2139 count = size;
2140 }
2141
2142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2143 memcpy(buf, &src[off], count);
2144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2145 return count;
2146}
2147
2148static struct bin_attribute ipr_trace_attr = {
2149 .attr = {
2150 .name = "trace",
2151 .mode = S_IRUGO,
2152 },
2153 .size = 0,
2154 .read = ipr_read_trace,
2155};
2156#endif
2157
62275040 2158static const struct {
2159 enum ipr_cache_state state;
2160 char *name;
2161} cache_state [] = {
2162 { CACHE_NONE, "none" },
2163 { CACHE_DISABLED, "disabled" },
2164 { CACHE_ENABLED, "enabled" }
2165};
2166
2167/**
2168 * ipr_show_write_caching - Show the write caching attribute
2169 * @class_dev: class device struct
2170 * @buf: buffer
2171 *
2172 * Return value:
2173 * number of bytes printed to buffer
2174 **/
2175static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2176{
2177 struct Scsi_Host *shost = class_to_shost(class_dev);
2178 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2179 unsigned long lock_flags = 0;
2180 int i, len = 0;
2181
2182 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2183 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2184 if (cache_state[i].state == ioa_cfg->cache_state) {
2185 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2186 break;
2187 }
2188 }
2189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2190 return len;
2191}
2192
2193
2194/**
2195 * ipr_store_write_caching - Enable/disable adapter write cache
2196 * @class_dev: class_device struct
2197 * @buf: buffer
2198 * @count: buffer size
2199 *
2200 * This function will enable/disable adapter write cache.
2201 *
2202 * Return value:
2203 * count on success / other on failure
2204 **/
2205static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2206 const char *buf, size_t count)
2207{
2208 struct Scsi_Host *shost = class_to_shost(class_dev);
2209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2210 unsigned long lock_flags = 0;
2211 enum ipr_cache_state new_state = CACHE_INVALID;
2212 int i;
2213
2214 if (!capable(CAP_SYS_ADMIN))
2215 return -EACCES;
2216 if (ioa_cfg->cache_state == CACHE_NONE)
2217 return -EINVAL;
2218
2219 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2220 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2221 new_state = cache_state[i].state;
2222 break;
2223 }
2224 }
2225
2226 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2227 return -EINVAL;
2228
2229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2230 if (ioa_cfg->cache_state == new_state) {
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2232 return count;
2233 }
2234
2235 ioa_cfg->cache_state = new_state;
2236 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2237 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2238 if (!ioa_cfg->in_reset_reload)
2239 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2241 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2242
2243 return count;
2244}
2245
2246static struct class_device_attribute ipr_ioa_cache_attr = {
2247 .attr = {
2248 .name = "write_cache",
2249 .mode = S_IRUGO | S_IWUSR,
2250 },
2251 .show = ipr_show_write_caching,
2252 .store = ipr_store_write_caching
2253};
2254
1da177e4
LT
2255/**
2256 * ipr_show_fw_version - Show the firmware version
2257 * @class_dev: class device struct
2258 * @buf: buffer
2259 *
2260 * Return value:
2261 * number of bytes printed to buffer
2262 **/
2263static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2264{
2265 struct Scsi_Host *shost = class_to_shost(class_dev);
2266 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2267 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2268 unsigned long lock_flags = 0;
2269 int len;
2270
2271 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2272 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2273 ucode_vpd->major_release, ucode_vpd->card_type,
2274 ucode_vpd->minor_release[0],
2275 ucode_vpd->minor_release[1]);
2276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2277 return len;
2278}
2279
2280static struct class_device_attribute ipr_fw_version_attr = {
2281 .attr = {
2282 .name = "fw_version",
2283 .mode = S_IRUGO,
2284 },
2285 .show = ipr_show_fw_version,
2286};
2287
2288/**
2289 * ipr_show_log_level - Show the adapter's error logging level
2290 * @class_dev: class device struct
2291 * @buf: buffer
2292 *
2293 * Return value:
2294 * number of bytes printed to buffer
2295 **/
2296static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2297{
2298 struct Scsi_Host *shost = class_to_shost(class_dev);
2299 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2300 unsigned long lock_flags = 0;
2301 int len;
2302
2303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2304 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2306 return len;
2307}
2308
2309/**
2310 * ipr_store_log_level - Change the adapter's error logging level
2311 * @class_dev: class device struct
2312 * @buf: buffer
2313 *
2314 * Return value:
2315 * number of bytes printed to buffer
2316 **/
2317static ssize_t ipr_store_log_level(struct class_device *class_dev,
2318 const char *buf, size_t count)
2319{
2320 struct Scsi_Host *shost = class_to_shost(class_dev);
2321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2322 unsigned long lock_flags = 0;
2323
2324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2325 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2326 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2327 return strlen(buf);
2328}
2329
2330static struct class_device_attribute ipr_log_level_attr = {
2331 .attr = {
2332 .name = "log_level",
2333 .mode = S_IRUGO | S_IWUSR,
2334 },
2335 .show = ipr_show_log_level,
2336 .store = ipr_store_log_level
2337};
2338
2339/**
2340 * ipr_store_diagnostics - IOA Diagnostics interface
2341 * @class_dev: class_device struct
2342 * @buf: buffer
2343 * @count: buffer size
2344 *
2345 * This function will reset the adapter and wait a reasonable
2346 * amount of time for any errors that the adapter might log.
2347 *
2348 * Return value:
2349 * count on success / other on failure
2350 **/
2351static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2352 const char *buf, size_t count)
2353{
2354 struct Scsi_Host *shost = class_to_shost(class_dev);
2355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2356 unsigned long lock_flags = 0;
2357 int rc = count;
2358
2359 if (!capable(CAP_SYS_ADMIN))
2360 return -EACCES;
2361
2362 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2364 ioa_cfg->errors_logged = 0;
2365 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2366
2367 if (ioa_cfg->in_reset_reload) {
2368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2369 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2370
2371 /* Wait for a second for any errors to be logged */
2372 msleep(1000);
2373 } else {
2374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2375 return -EIO;
2376 }
2377
2378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2379 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2380 rc = -EIO;
2381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382
2383 return rc;
2384}
2385
2386static struct class_device_attribute ipr_diagnostics_attr = {
2387 .attr = {
2388 .name = "run_diagnostics",
2389 .mode = S_IWUSR,
2390 },
2391 .store = ipr_store_diagnostics
2392};
2393
f37eb54b 2394/**
2395 * ipr_show_adapter_state - Show the adapter's state
2396 * @class_dev: class device struct
2397 * @buf: buffer
2398 *
2399 * Return value:
2400 * number of bytes printed to buffer
2401 **/
2402static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2403{
2404 struct Scsi_Host *shost = class_to_shost(class_dev);
2405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2406 unsigned long lock_flags = 0;
2407 int len;
2408
2409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2410 if (ioa_cfg->ioa_is_dead)
2411 len = snprintf(buf, PAGE_SIZE, "offline\n");
2412 else
2413 len = snprintf(buf, PAGE_SIZE, "online\n");
2414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2415 return len;
2416}
2417
2418/**
2419 * ipr_store_adapter_state - Change adapter state
2420 * @class_dev: class_device struct
2421 * @buf: buffer
2422 * @count: buffer size
2423 *
2424 * This function will change the adapter's state.
2425 *
2426 * Return value:
2427 * count on success / other on failure
2428 **/
2429static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2430 const char *buf, size_t count)
2431{
2432 struct Scsi_Host *shost = class_to_shost(class_dev);
2433 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2434 unsigned long lock_flags;
2435 int result = count;
2436
2437 if (!capable(CAP_SYS_ADMIN))
2438 return -EACCES;
2439
2440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2441 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2442 ioa_cfg->ioa_is_dead = 0;
2443 ioa_cfg->reset_retries = 0;
2444 ioa_cfg->in_ioa_bringdown = 0;
2445 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2446 }
2447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2448 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2449
2450 return result;
2451}
2452
2453static struct class_device_attribute ipr_ioa_state_attr = {
2454 .attr = {
2455 .name = "state",
2456 .mode = S_IRUGO | S_IWUSR,
2457 },
2458 .show = ipr_show_adapter_state,
2459 .store = ipr_store_adapter_state
2460};
2461
1da177e4
LT
2462/**
2463 * ipr_store_reset_adapter - Reset the adapter
2464 * @class_dev: class_device struct
2465 * @buf: buffer
2466 * @count: buffer size
2467 *
2468 * This function will reset the adapter.
2469 *
2470 * Return value:
2471 * count on success / other on failure
2472 **/
2473static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2474 const char *buf, size_t count)
2475{
2476 struct Scsi_Host *shost = class_to_shost(class_dev);
2477 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2478 unsigned long lock_flags;
2479 int result = count;
2480
2481 if (!capable(CAP_SYS_ADMIN))
2482 return -EACCES;
2483
2484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2485 if (!ioa_cfg->in_reset_reload)
2486 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2488 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2489
2490 return result;
2491}
2492
2493static struct class_device_attribute ipr_ioa_reset_attr = {
2494 .attr = {
2495 .name = "reset_host",
2496 .mode = S_IWUSR,
2497 },
2498 .store = ipr_store_reset_adapter
2499};
2500
2501/**
2502 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2503 * @buf_len: buffer length
2504 *
2505 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2506 * list to use for microcode download
2507 *
2508 * Return value:
2509 * pointer to sglist / NULL on failure
2510 **/
2511static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2512{
2513 int sg_size, order, bsize_elem, num_elem, i, j;
2514 struct ipr_sglist *sglist;
2515 struct scatterlist *scatterlist;
2516 struct page *page;
2517
2518 /* Get the minimum size per scatter/gather element */
2519 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2520
2521 /* Get the actual size per element */
2522 order = get_order(sg_size);
2523
2524 /* Determine the actual number of bytes per element */
2525 bsize_elem = PAGE_SIZE * (1 << order);
2526
2527 /* Determine the actual number of sg entries needed */
2528 if (buf_len % bsize_elem)
2529 num_elem = (buf_len / bsize_elem) + 1;
2530 else
2531 num_elem = buf_len / bsize_elem;
2532
2533 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2534 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2535 (sizeof(struct scatterlist) * (num_elem - 1)),
2536 GFP_KERNEL);
2537
2538 if (sglist == NULL) {
2539 ipr_trace;
2540 return NULL;
2541 }
2542
1da177e4
LT
2543 scatterlist = sglist->scatterlist;
2544
2545 sglist->order = order;
2546 sglist->num_sg = num_elem;
2547
2548 /* Allocate a bunch of sg elements */
2549 for (i = 0; i < num_elem; i++) {
2550 page = alloc_pages(GFP_KERNEL, order);
2551 if (!page) {
2552 ipr_trace;
2553
2554 /* Free up what we already allocated */
2555 for (j = i - 1; j >= 0; j--)
2556 __free_pages(scatterlist[j].page, order);
2557 kfree(sglist);
2558 return NULL;
2559 }
2560
2561 scatterlist[i].page = page;
2562 }
2563
2564 return sglist;
2565}
2566
2567/**
2568 * ipr_free_ucode_buffer - Frees a microcode download buffer
2569 * @p_dnld: scatter/gather list pointer
2570 *
2571 * Free a DMA'able ucode download buffer previously allocated with
2572 * ipr_alloc_ucode_buffer
2573 *
2574 * Return value:
2575 * nothing
2576 **/
2577static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2578{
2579 int i;
2580
2581 for (i = 0; i < sglist->num_sg; i++)
2582 __free_pages(sglist->scatterlist[i].page, sglist->order);
2583
2584 kfree(sglist);
2585}
2586
2587/**
2588 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2589 * @sglist: scatter/gather list pointer
2590 * @buffer: buffer pointer
2591 * @len: buffer length
2592 *
2593 * Copy a microcode image from a user buffer into a buffer allocated by
2594 * ipr_alloc_ucode_buffer
2595 *
2596 * Return value:
2597 * 0 on success / other on failure
2598 **/
2599static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2600 u8 *buffer, u32 len)
2601{
2602 int bsize_elem, i, result = 0;
2603 struct scatterlist *scatterlist;
2604 void *kaddr;
2605
2606 /* Determine the actual number of bytes per element */
2607 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2608
2609 scatterlist = sglist->scatterlist;
2610
2611 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2612 kaddr = kmap(scatterlist[i].page);
2613 memcpy(kaddr, buffer, bsize_elem);
2614 kunmap(scatterlist[i].page);
2615
2616 scatterlist[i].length = bsize_elem;
2617
2618 if (result != 0) {
2619 ipr_trace;
2620 return result;
2621 }
2622 }
2623
2624 if (len % bsize_elem) {
2625 kaddr = kmap(scatterlist[i].page);
2626 memcpy(kaddr, buffer, len % bsize_elem);
2627 kunmap(scatterlist[i].page);
2628
2629 scatterlist[i].length = len % bsize_elem;
2630 }
2631
2632 sglist->buffer_len = len;
2633 return result;
2634}
2635
2636/**
12baa420 2637 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2638 * @ipr_cmd: ipr command struct
2639 * @sglist: scatter/gather list
1da177e4 2640 *
12baa420 2641 * Builds a microcode download IOA data list (IOADL).
1da177e4 2642 *
1da177e4 2643 **/
12baa420 2644static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2645 struct ipr_sglist *sglist)
1da177e4 2646{
1da177e4
LT
2647 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2648 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2649 struct scatterlist *scatterlist = sglist->scatterlist;
2650 int i;
2651
12baa420 2652 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 2654 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
2655 ioarcb->write_ioadl_len =
2656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2657
2658 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2659 ioadl[i].flags_and_data_len =
2660 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2661 ioadl[i].address =
2662 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2663 }
2664
12baa420 2665 ioadl[i-1].flags_and_data_len |=
2666 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2667}
2668
2669/**
2670 * ipr_update_ioa_ucode - Update IOA's microcode
2671 * @ioa_cfg: ioa config struct
2672 * @sglist: scatter/gather list
2673 *
2674 * Initiate an adapter reset to update the IOA's microcode
2675 *
2676 * Return value:
2677 * 0 on success / -EIO on failure
2678 **/
2679static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2680 struct ipr_sglist *sglist)
2681{
2682 unsigned long lock_flags;
2683
2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2685
2686 if (ioa_cfg->ucode_sglist) {
2687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 dev_err(&ioa_cfg->pdev->dev,
2689 "Microcode download already in progress\n");
2690 return -EIO;
1da177e4 2691 }
12baa420 2692
2693 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2694 sglist->num_sg, DMA_TO_DEVICE);
2695
2696 if (!sglist->num_dma_sg) {
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698 dev_err(&ioa_cfg->pdev->dev,
2699 "Failed to map microcode download buffer!\n");
1da177e4
LT
2700 return -EIO;
2701 }
2702
12baa420 2703 ioa_cfg->ucode_sglist = sglist;
2704 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2705 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2706 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2707
2708 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709 ioa_cfg->ucode_sglist = NULL;
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
2711 return 0;
2712}
2713
2714/**
2715 * ipr_store_update_fw - Update the firmware on the adapter
2716 * @class_dev: class_device struct
2717 * @buf: buffer
2718 * @count: buffer size
2719 *
2720 * This function will update the firmware on the adapter.
2721 *
2722 * Return value:
2723 * count on success / other on failure
2724 **/
2725static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2726 const char *buf, size_t count)
2727{
2728 struct Scsi_Host *shost = class_to_shost(class_dev);
2729 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2730 struct ipr_ucode_image_header *image_hdr;
2731 const struct firmware *fw_entry;
2732 struct ipr_sglist *sglist;
1da177e4
LT
2733 char fname[100];
2734 char *src;
2735 int len, result, dnld_size;
2736
2737 if (!capable(CAP_SYS_ADMIN))
2738 return -EACCES;
2739
2740 len = snprintf(fname, 99, "%s", buf);
2741 fname[len-1] = '\0';
2742
2743 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2744 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2745 return -EIO;
2746 }
2747
2748 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2749
2750 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2751 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2752 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2753 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2754 release_firmware(fw_entry);
2755 return -EINVAL;
2756 }
2757
2758 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2759 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2760 sglist = ipr_alloc_ucode_buffer(dnld_size);
2761
2762 if (!sglist) {
2763 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2764 release_firmware(fw_entry);
2765 return -ENOMEM;
2766 }
2767
2768 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2769
2770 if (result) {
2771 dev_err(&ioa_cfg->pdev->dev,
2772 "Microcode buffer copy to DMA buffer failed\n");
12baa420 2773 goto out;
1da177e4
LT
2774 }
2775
12baa420 2776 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 2777
12baa420 2778 if (!result)
2779 result = count;
2780out:
1da177e4
LT
2781 ipr_free_ucode_buffer(sglist);
2782 release_firmware(fw_entry);
12baa420 2783 return result;
1da177e4
LT
2784}
2785
2786static struct class_device_attribute ipr_update_fw_attr = {
2787 .attr = {
2788 .name = "update_fw",
2789 .mode = S_IWUSR,
2790 },
2791 .store = ipr_store_update_fw
2792};
2793
2794static struct class_device_attribute *ipr_ioa_attrs[] = {
2795 &ipr_fw_version_attr,
2796 &ipr_log_level_attr,
2797 &ipr_diagnostics_attr,
f37eb54b 2798 &ipr_ioa_state_attr,
1da177e4
LT
2799 &ipr_ioa_reset_attr,
2800 &ipr_update_fw_attr,
62275040 2801 &ipr_ioa_cache_attr,
1da177e4
LT
2802 NULL,
2803};
2804
2805#ifdef CONFIG_SCSI_IPR_DUMP
2806/**
2807 * ipr_read_dump - Dump the adapter
2808 * @kobj: kobject struct
2809 * @buf: buffer
2810 * @off: offset
2811 * @count: buffer size
2812 *
2813 * Return value:
2814 * number of bytes printed to buffer
2815 **/
2816static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2817 loff_t off, size_t count)
2818{
2819 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2820 struct Scsi_Host *shost = class_to_shost(cdev);
2821 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2822 struct ipr_dump *dump;
2823 unsigned long lock_flags = 0;
2824 char *src;
2825 int len;
2826 size_t rc = count;
2827
2828 if (!capable(CAP_SYS_ADMIN))
2829 return -EACCES;
2830
2831 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2832 dump = ioa_cfg->dump;
2833
2834 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2836 return 0;
2837 }
2838 kref_get(&dump->kref);
2839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2840
2841 if (off > dump->driver_dump.hdr.len) {
2842 kref_put(&dump->kref, ipr_release_dump);
2843 return 0;
2844 }
2845
2846 if (off + count > dump->driver_dump.hdr.len) {
2847 count = dump->driver_dump.hdr.len - off;
2848 rc = count;
2849 }
2850
2851 if (count && off < sizeof(dump->driver_dump)) {
2852 if (off + count > sizeof(dump->driver_dump))
2853 len = sizeof(dump->driver_dump) - off;
2854 else
2855 len = count;
2856 src = (u8 *)&dump->driver_dump + off;
2857 memcpy(buf, src, len);
2858 buf += len;
2859 off += len;
2860 count -= len;
2861 }
2862
2863 off -= sizeof(dump->driver_dump);
2864
2865 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2866 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2867 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2868 else
2869 len = count;
2870 src = (u8 *)&dump->ioa_dump + off;
2871 memcpy(buf, src, len);
2872 buf += len;
2873 off += len;
2874 count -= len;
2875 }
2876
2877 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2878
2879 while (count) {
2880 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2881 len = PAGE_ALIGN(off) - off;
2882 else
2883 len = count;
2884 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2885 src += off & ~PAGE_MASK;
2886 memcpy(buf, src, len);
2887 buf += len;
2888 off += len;
2889 count -= len;
2890 }
2891
2892 kref_put(&dump->kref, ipr_release_dump);
2893 return rc;
2894}
2895
2896/**
2897 * ipr_alloc_dump - Prepare for adapter dump
2898 * @ioa_cfg: ioa config struct
2899 *
2900 * Return value:
2901 * 0 on success / other on failure
2902 **/
2903static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2904{
2905 struct ipr_dump *dump;
2906 unsigned long lock_flags = 0;
2907
2908 ENTER;
0bc42e35 2909 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
2910
2911 if (!dump) {
2912 ipr_err("Dump memory allocation failed\n");
2913 return -ENOMEM;
2914 }
2915
1da177e4
LT
2916 kref_init(&dump->kref);
2917 dump->ioa_cfg = ioa_cfg;
2918
2919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2920
2921 if (INACTIVE != ioa_cfg->sdt_state) {
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923 kfree(dump);
2924 return 0;
2925 }
2926
2927 ioa_cfg->dump = dump;
2928 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2929 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2930 ioa_cfg->dump_taken = 1;
2931 schedule_work(&ioa_cfg->work_q);
2932 }
2933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2934
2935 LEAVE;
2936 return 0;
2937}
2938
2939/**
2940 * ipr_free_dump - Free adapter dump memory
2941 * @ioa_cfg: ioa config struct
2942 *
2943 * Return value:
2944 * 0 on success / other on failure
2945 **/
2946static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2947{
2948 struct ipr_dump *dump;
2949 unsigned long lock_flags = 0;
2950
2951 ENTER;
2952
2953 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2954 dump = ioa_cfg->dump;
2955 if (!dump) {
2956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2957 return 0;
2958 }
2959
2960 ioa_cfg->dump = NULL;
2961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2962
2963 kref_put(&dump->kref, ipr_release_dump);
2964
2965 LEAVE;
2966 return 0;
2967}
2968
2969/**
2970 * ipr_write_dump - Setup dump state of adapter
2971 * @kobj: kobject struct
2972 * @buf: buffer
2973 * @off: offset
2974 * @count: buffer size
2975 *
2976 * Return value:
2977 * number of bytes printed to buffer
2978 **/
2979static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2980 loff_t off, size_t count)
2981{
2982 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2983 struct Scsi_Host *shost = class_to_shost(cdev);
2984 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2985 int rc;
2986
2987 if (!capable(CAP_SYS_ADMIN))
2988 return -EACCES;
2989
2990 if (buf[0] == '1')
2991 rc = ipr_alloc_dump(ioa_cfg);
2992 else if (buf[0] == '0')
2993 rc = ipr_free_dump(ioa_cfg);
2994 else
2995 return -EINVAL;
2996
2997 if (rc)
2998 return rc;
2999 else
3000 return count;
3001}
3002
3003static struct bin_attribute ipr_dump_attr = {
3004 .attr = {
3005 .name = "dump",
3006 .mode = S_IRUSR | S_IWUSR,
3007 },
3008 .size = 0,
3009 .read = ipr_read_dump,
3010 .write = ipr_write_dump
3011};
3012#else
3013static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3014#endif
3015
3016/**
3017 * ipr_change_queue_depth - Change the device's queue depth
3018 * @sdev: scsi device struct
3019 * @qdepth: depth to set
3020 *
3021 * Return value:
3022 * actual depth set
3023 **/
3024static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3025{
3026 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3027 return sdev->queue_depth;
3028}
3029
3030/**
3031 * ipr_change_queue_type - Change the device's queue type
3032 * @dsev: scsi device struct
3033 * @tag_type: type of tags to use
3034 *
3035 * Return value:
3036 * actual queue type set
3037 **/
3038static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3039{
3040 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3041 struct ipr_resource_entry *res;
3042 unsigned long lock_flags = 0;
3043
3044 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3045 res = (struct ipr_resource_entry *)sdev->hostdata;
3046
3047 if (res) {
3048 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3049 /*
3050 * We don't bother quiescing the device here since the
3051 * adapter firmware does it for us.
3052 */
3053 scsi_set_tag_type(sdev, tag_type);
3054
3055 if (tag_type)
3056 scsi_activate_tcq(sdev, sdev->queue_depth);
3057 else
3058 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3059 } else
3060 tag_type = 0;
3061 } else
3062 tag_type = 0;
3063
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 return tag_type;
3066}
3067
3068/**
3069 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3070 * @dev: device struct
3071 * @buf: buffer
3072 *
3073 * Return value:
3074 * number of bytes printed to buffer
3075 **/
10523b3b 3076static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3077{
3078 struct scsi_device *sdev = to_scsi_device(dev);
3079 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3080 struct ipr_resource_entry *res;
3081 unsigned long lock_flags = 0;
3082 ssize_t len = -ENXIO;
3083
3084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3085 res = (struct ipr_resource_entry *)sdev->hostdata;
3086 if (res)
3087 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3089 return len;
3090}
3091
3092static struct device_attribute ipr_adapter_handle_attr = {
3093 .attr = {
3094 .name = "adapter_handle",
3095 .mode = S_IRUSR,
3096 },
3097 .show = ipr_show_adapter_handle
3098};
3099
3100static struct device_attribute *ipr_dev_attrs[] = {
3101 &ipr_adapter_handle_attr,
3102 NULL,
3103};
3104
3105/**
3106 * ipr_biosparam - Return the HSC mapping
3107 * @sdev: scsi device struct
3108 * @block_device: block device pointer
3109 * @capacity: capacity of the device
3110 * @parm: Array containing returned HSC values.
3111 *
3112 * This function generates the HSC parms that fdisk uses.
3113 * We want to make sure we return something that places partitions
3114 * on 4k boundaries for best performance with the IOA.
3115 *
3116 * Return value:
3117 * 0 on success
3118 **/
3119static int ipr_biosparam(struct scsi_device *sdev,
3120 struct block_device *block_device,
3121 sector_t capacity, int *parm)
3122{
3123 int heads, sectors;
3124 sector_t cylinders;
3125
3126 heads = 128;
3127 sectors = 32;
3128
3129 cylinders = capacity;
3130 sector_div(cylinders, (128 * 32));
3131
3132 /* return result */
3133 parm[0] = heads;
3134 parm[1] = sectors;
3135 parm[2] = cylinders;
3136
3137 return 0;
3138}
3139
3140/**
3141 * ipr_slave_destroy - Unconfigure a SCSI device
3142 * @sdev: scsi device struct
3143 *
3144 * Return value:
3145 * nothing
3146 **/
3147static void ipr_slave_destroy(struct scsi_device *sdev)
3148{
3149 struct ipr_resource_entry *res;
3150 struct ipr_ioa_cfg *ioa_cfg;
3151 unsigned long lock_flags = 0;
3152
3153 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3154
3155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3156 res = (struct ipr_resource_entry *) sdev->hostdata;
3157 if (res) {
3158 sdev->hostdata = NULL;
3159 res->sdev = NULL;
3160 }
3161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3162}
3163
3164/**
3165 * ipr_slave_configure - Configure a SCSI device
3166 * @sdev: scsi device struct
3167 *
3168 * This function configures the specified scsi device.
3169 *
3170 * Return value:
3171 * 0 on success
3172 **/
3173static int ipr_slave_configure(struct scsi_device *sdev)
3174{
3175 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3176 struct ipr_resource_entry *res;
3177 unsigned long lock_flags = 0;
3178
3179 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3180 res = sdev->hostdata;
3181 if (res) {
3182 if (ipr_is_af_dasd_device(res))
3183 sdev->type = TYPE_RAID;
0726ce26 3184 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3185 sdev->scsi_level = 4;
0726ce26 3186 sdev->no_uld_attach = 1;
3187 }
1da177e4
LT
3188 if (ipr_is_vset_device(res)) {
3189 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3190 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3191 }
e4fbf44e 3192 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4
LT
3193 sdev->allow_restart = 1;
3194 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3195 }
3196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3197 return 0;
3198}
3199
3200/**
3201 * ipr_slave_alloc - Prepare for commands to a device.
3202 * @sdev: scsi device struct
3203 *
3204 * This function saves a pointer to the resource entry
3205 * in the scsi device struct if the device exists. We
3206 * can then use this pointer in ipr_queuecommand when
3207 * handling new commands.
3208 *
3209 * Return value:
692aebfc 3210 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3211 **/
3212static int ipr_slave_alloc(struct scsi_device *sdev)
3213{
3214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3215 struct ipr_resource_entry *res;
3216 unsigned long lock_flags;
692aebfc 3217 int rc = -ENXIO;
1da177e4
LT
3218
3219 sdev->hostdata = NULL;
3220
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222
3223 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3224 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3225 (res->cfgte.res_addr.target == sdev->id) &&
3226 (res->cfgte.res_addr.lun == sdev->lun)) {
3227 res->sdev = sdev;
3228 res->add_to_ml = 0;
3229 res->in_erp = 0;
3230 sdev->hostdata = res;
ee0a90fa 3231 if (!ipr_is_naca_model(res))
3232 res->needs_sync_complete = 1;
692aebfc 3233 rc = 0;
1da177e4
LT
3234 break;
3235 }
3236 }
3237
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3239
692aebfc 3240 return rc;
1da177e4
LT
3241}
3242
3243/**
3244 * ipr_eh_host_reset - Reset the host adapter
3245 * @scsi_cmd: scsi command struct
3246 *
3247 * Return value:
3248 * SUCCESS / FAILED
3249 **/
df0ae249 3250static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3251{
3252 struct ipr_ioa_cfg *ioa_cfg;
3253 int rc;
3254
3255 ENTER;
3256 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3257
3258 dev_err(&ioa_cfg->pdev->dev,
3259 "Adapter being reset as a result of error recovery.\n");
3260
3261 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3262 ioa_cfg->sdt_state = GET_DUMP;
3263
3264 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3265
3266 LEAVE;
3267 return rc;
3268}
3269
df0ae249
JG
3270static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3271{
3272 int rc;
3273
3274 spin_lock_irq(cmd->device->host->host_lock);
3275 rc = __ipr_eh_host_reset(cmd);
3276 spin_unlock_irq(cmd->device->host->host_lock);
3277
3278 return rc;
3279}
3280
c6513096
BK
3281/**
3282 * ipr_device_reset - Reset the device
3283 * @ioa_cfg: ioa config struct
3284 * @res: resource entry struct
3285 *
3286 * This function issues a device reset to the affected device.
3287 * If the device is a SCSI device, a LUN reset will be sent
3288 * to the device first. If that does not work, a target reset
3289 * will be sent.
3290 *
3291 * Return value:
3292 * 0 on success / non-zero on failure
3293 **/
3294static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3295 struct ipr_resource_entry *res)
3296{
3297 struct ipr_cmnd *ipr_cmd;
3298 struct ipr_ioarcb *ioarcb;
3299 struct ipr_cmd_pkt *cmd_pkt;
3300 u32 ioasc;
3301
3302 ENTER;
3303 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3304 ioarcb = &ipr_cmd->ioarcb;
3305 cmd_pkt = &ioarcb->cmd_pkt;
3306
3307 ioarcb->res_handle = res->cfgte.res_handle;
3308 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3309 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3310
3311 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3312 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3313 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3314
3315 LEAVE;
3316 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3317}
3318
1da177e4
LT
3319/**
3320 * ipr_eh_dev_reset - Reset the device
3321 * @scsi_cmd: scsi command struct
3322 *
3323 * This function issues a device reset to the affected device.
3324 * A LUN reset will be sent to the device first. If that does
3325 * not work, a target reset will be sent.
3326 *
3327 * Return value:
3328 * SUCCESS / FAILED
3329 **/
94d0e7b8 3330static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3331{
3332 struct ipr_cmnd *ipr_cmd;
3333 struct ipr_ioa_cfg *ioa_cfg;
3334 struct ipr_resource_entry *res;
c6513096 3335 int rc;
1da177e4
LT
3336
3337 ENTER;
3338 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3339 res = scsi_cmd->device->hostdata;
3340
eeb88307 3341 if (!res)
1da177e4
LT
3342 return FAILED;
3343
3344 /*
3345 * If we are currently going through reset/reload, return failed. This will force the
3346 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3347 * reset to complete
3348 */
3349 if (ioa_cfg->in_reset_reload)
3350 return FAILED;
3351 if (ioa_cfg->ioa_is_dead)
3352 return FAILED;
3353
3354 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3355 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3356 if (ipr_cmd->scsi_cmd)
3357 ipr_cmd->done = ipr_scsi_eh_done;
3358 }
3359 }
3360
3361 res->resetting_device = 1;
fb3ed3cb 3362 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
c6513096 3363 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3364 res->resetting_device = 0;
3365
1da177e4 3366 LEAVE;
c6513096 3367 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3368}
3369
94d0e7b8
JG
3370static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3371{
3372 int rc;
3373
3374 spin_lock_irq(cmd->device->host->host_lock);
3375 rc = __ipr_eh_dev_reset(cmd);
3376 spin_unlock_irq(cmd->device->host->host_lock);
3377
3378 return rc;
3379}
3380
1da177e4
LT
3381/**
3382 * ipr_bus_reset_done - Op done function for bus reset.
3383 * @ipr_cmd: ipr command struct
3384 *
3385 * This function is the op done function for a bus reset
3386 *
3387 * Return value:
3388 * none
3389 **/
3390static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3391{
3392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3393 struct ipr_resource_entry *res;
3394
3395 ENTER;
3396 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3397 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3398 sizeof(res->cfgte.res_handle))) {
3399 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3400 break;
3401 }
3402 }
3403
3404 /*
3405 * If abort has not completed, indicate the reset has, else call the
3406 * abort's done function to wake the sleeping eh thread
3407 */
3408 if (ipr_cmd->sibling->sibling)
3409 ipr_cmd->sibling->sibling = NULL;
3410 else
3411 ipr_cmd->sibling->done(ipr_cmd->sibling);
3412
3413 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3414 LEAVE;
3415}
3416
3417/**
3418 * ipr_abort_timeout - An abort task has timed out
3419 * @ipr_cmd: ipr command struct
3420 *
3421 * This function handles when an abort task times out. If this
3422 * happens we issue a bus reset since we have resources tied
3423 * up that must be freed before returning to the midlayer.
3424 *
3425 * Return value:
3426 * none
3427 **/
3428static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3429{
3430 struct ipr_cmnd *reset_cmd;
3431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3432 struct ipr_cmd_pkt *cmd_pkt;
3433 unsigned long lock_flags = 0;
3434
3435 ENTER;
3436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3437 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3439 return;
3440 }
3441
fb3ed3cb 3442 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
3443 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3444 ipr_cmd->sibling = reset_cmd;
3445 reset_cmd->sibling = ipr_cmd;
3446 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3447 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3448 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3449 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3450 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3451
3452 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 LEAVE;
3455}
3456
3457/**
3458 * ipr_cancel_op - Cancel specified op
3459 * @scsi_cmd: scsi command struct
3460 *
3461 * This function cancels specified op.
3462 *
3463 * Return value:
3464 * SUCCESS / FAILED
3465 **/
3466static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3467{
3468 struct ipr_cmnd *ipr_cmd;
3469 struct ipr_ioa_cfg *ioa_cfg;
3470 struct ipr_resource_entry *res;
3471 struct ipr_cmd_pkt *cmd_pkt;
3472 u32 ioasc;
3473 int op_found = 0;
3474
3475 ENTER;
3476 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3477 res = scsi_cmd->device->hostdata;
3478
8fa728a2
JG
3479 /* If we are currently going through reset/reload, return failed.
3480 * This will force the mid-layer to call ipr_eh_host_reset,
3481 * which will then go to sleep and wait for the reset to complete
3482 */
3483 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3484 return FAILED;
1da177e4
LT
3485 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3486 return FAILED;
3487
3488 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3489 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3490 ipr_cmd->done = ipr_scsi_eh_done;
3491 op_found = 1;
3492 break;
3493 }
3494 }
3495
3496 if (!op_found)
3497 return SUCCESS;
3498
3499 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3500 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3501 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3502 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3503 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3504 ipr_cmd->u.sdev = scsi_cmd->device;
3505
fb3ed3cb
BK
3506 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3507 scsi_cmd->cmnd[0]);
1da177e4
LT
3508 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3509 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3510
3511 /*
3512 * If the abort task timed out and we sent a bus reset, we will get
3513 * one the following responses to the abort
3514 */
3515 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3516 ioasc = 0;
3517 ipr_trace;
3518 }
3519
3520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 3521 if (!ipr_is_naca_model(res))
3522 res->needs_sync_complete = 1;
1da177e4
LT
3523
3524 LEAVE;
3525 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3526}
3527
3528/**
3529 * ipr_eh_abort - Abort a single op
3530 * @scsi_cmd: scsi command struct
3531 *
3532 * Return value:
3533 * SUCCESS / FAILED
3534 **/
3535static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3536{
8fa728a2
JG
3537 unsigned long flags;
3538 int rc;
1da177e4
LT
3539
3540 ENTER;
1da177e4 3541
8fa728a2
JG
3542 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3543 rc = ipr_cancel_op(scsi_cmd);
3544 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
3545
3546 LEAVE;
8fa728a2 3547 return rc;
1da177e4
LT
3548}
3549
3550/**
3551 * ipr_handle_other_interrupt - Handle "other" interrupts
3552 * @ioa_cfg: ioa config struct
3553 * @int_reg: interrupt register
3554 *
3555 * Return value:
3556 * IRQ_NONE / IRQ_HANDLED
3557 **/
3558static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3559 volatile u32 int_reg)
3560{
3561 irqreturn_t rc = IRQ_HANDLED;
3562
3563 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3564 /* Mask the interrupt */
3565 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3566
3567 /* Clear the interrupt */
3568 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3569 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3570
3571 list_del(&ioa_cfg->reset_cmd->queue);
3572 del_timer(&ioa_cfg->reset_cmd->timer);
3573 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3574 } else {
3575 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3576 ioa_cfg->ioa_unit_checked = 1;
3577 else
3578 dev_err(&ioa_cfg->pdev->dev,
3579 "Permanent IOA failure. 0x%08X\n", int_reg);
3580
3581 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3582 ioa_cfg->sdt_state = GET_DUMP;
3583
3584 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3585 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3586 }
3587
3588 return rc;
3589}
3590
3591/**
3592 * ipr_isr - Interrupt service routine
3593 * @irq: irq number
3594 * @devp: pointer to ioa config struct
3595 * @regs: pt_regs struct
3596 *
3597 * Return value:
3598 * IRQ_NONE / IRQ_HANDLED
3599 **/
3600static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3601{
3602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3603 unsigned long lock_flags = 0;
3604 volatile u32 int_reg, int_mask_reg;
3605 u32 ioasc;
3606 u16 cmd_index;
3607 struct ipr_cmnd *ipr_cmd;
3608 irqreturn_t rc = IRQ_NONE;
3609
3610 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611
3612 /* If interrupts are disabled, ignore the interrupt */
3613 if (!ioa_cfg->allow_interrupts) {
3614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3615 return IRQ_NONE;
3616 }
3617
3618 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3619 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3620
3621 /* If an interrupt on the adapter did not occur, ignore it */
3622 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624 return IRQ_NONE;
3625 }
3626
3627 while (1) {
3628 ipr_cmd = NULL;
3629
3630 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3631 ioa_cfg->toggle_bit) {
3632
3633 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3634 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3635
3636 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3637 ioa_cfg->errors_logged++;
3638 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3639
3640 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3641 ioa_cfg->sdt_state = GET_DUMP;
3642
3643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3645 return IRQ_HANDLED;
3646 }
3647
3648 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3649
3650 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3651
3652 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3653
3654 list_del(&ipr_cmd->queue);
3655 del_timer(&ipr_cmd->timer);
3656 ipr_cmd->done(ipr_cmd);
3657
3658 rc = IRQ_HANDLED;
3659
3660 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3661 ioa_cfg->hrrq_curr++;
3662 } else {
3663 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3664 ioa_cfg->toggle_bit ^= 1u;
3665 }
3666 }
3667
3668 if (ipr_cmd != NULL) {
3669 /* Clear the PCI interrupt */
3670 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3671 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3672 } else
3673 break;
3674 }
3675
3676 if (unlikely(rc == IRQ_NONE))
3677 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3678
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 return rc;
3681}
3682
3683/**
3684 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3685 * @ioa_cfg: ioa config struct
3686 * @ipr_cmd: ipr command struct
3687 *
3688 * Return value:
3689 * 0 on success / -1 on failure
3690 **/
3691static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3692 struct ipr_cmnd *ipr_cmd)
3693{
3694 int i;
3695 struct scatterlist *sglist;
3696 u32 length;
3697 u32 ioadl_flags = 0;
3698 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3699 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3700 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3701
3702 length = scsi_cmd->request_bufflen;
3703
3704 if (length == 0)
3705 return 0;
3706
3707 if (scsi_cmd->use_sg) {
3708 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3709 scsi_cmd->request_buffer,
3710 scsi_cmd->use_sg,
3711 scsi_cmd->sc_data_direction);
3712
3713 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3714 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3715 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3716 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3717 ioarcb->write_ioadl_len =
3718 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3719 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3720 ioadl_flags = IPR_IOADL_FLAGS_READ;
3721 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3722 ioarcb->read_ioadl_len =
3723 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3724 }
3725
3726 sglist = scsi_cmd->request_buffer;
3727
3728 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3729 ioadl[i].flags_and_data_len =
3730 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3731 ioadl[i].address =
3732 cpu_to_be32(sg_dma_address(&sglist[i]));
3733 }
3734
3735 if (likely(ipr_cmd->dma_use_sg)) {
3736 ioadl[i-1].flags_and_data_len |=
3737 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3738 return 0;
3739 } else
3740 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3741 } else {
3742 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3743 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3744 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3745 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3746 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3747 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748 ioadl_flags = IPR_IOADL_FLAGS_READ;
3749 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3751 }
3752
3753 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3754 scsi_cmd->request_buffer, length,
3755 scsi_cmd->sc_data_direction);
3756
3757 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3758 ipr_cmd->dma_use_sg = 1;
3759 ioadl[0].flags_and_data_len =
3760 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3761 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3762 return 0;
3763 } else
3764 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3765 }
3766
3767 return -1;
3768}
3769
3770/**
3771 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3772 * @scsi_cmd: scsi command struct
3773 *
3774 * Return value:
3775 * task attributes
3776 **/
3777static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3778{
3779 u8 tag[2];
3780 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3781
3782 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3783 switch (tag[0]) {
3784 case MSG_SIMPLE_TAG:
3785 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3786 break;
3787 case MSG_HEAD_TAG:
3788 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3789 break;
3790 case MSG_ORDERED_TAG:
3791 rc = IPR_FLAGS_LO_ORDERED_TASK;
3792 break;
3793 };
3794 }
3795
3796 return rc;
3797}
3798
3799/**
3800 * ipr_erp_done - Process completion of ERP for a device
3801 * @ipr_cmd: ipr command struct
3802 *
3803 * This function copies the sense buffer into the scsi_cmd
3804 * struct and pushes the scsi_done function.
3805 *
3806 * Return value:
3807 * nothing
3808 **/
3809static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3810{
3811 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3812 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3814 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3815
3816 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3817 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
3818 scmd_printk(KERN_ERR, scsi_cmd,
3819 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
3820 } else {
3821 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3822 SCSI_SENSE_BUFFERSIZE);
3823 }
3824
3825 if (res) {
ee0a90fa 3826 if (!ipr_is_naca_model(res))
3827 res->needs_sync_complete = 1;
1da177e4
LT
3828 res->in_erp = 0;
3829 }
3830 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3831 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3832 scsi_cmd->scsi_done(scsi_cmd);
3833}
3834
3835/**
3836 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3837 * @ipr_cmd: ipr command struct
3838 *
3839 * Return value:
3840 * none
3841 **/
3842static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3843{
3844 struct ipr_ioarcb *ioarcb;
3845 struct ipr_ioasa *ioasa;
3846
3847 ioarcb = &ipr_cmd->ioarcb;
3848 ioasa = &ipr_cmd->ioasa;
3849
3850 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3851 ioarcb->write_data_transfer_length = 0;
3852 ioarcb->read_data_transfer_length = 0;
3853 ioarcb->write_ioadl_len = 0;
3854 ioarcb->read_ioadl_len = 0;
3855 ioasa->ioasc = 0;
3856 ioasa->residual_data_len = 0;
3857}
3858
3859/**
3860 * ipr_erp_request_sense - Send request sense to a device
3861 * @ipr_cmd: ipr command struct
3862 *
3863 * This function sends a request sense to a device as a result
3864 * of a check condition.
3865 *
3866 * Return value:
3867 * nothing
3868 **/
3869static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3870{
3871 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3872 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3873
3874 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3875 ipr_erp_done(ipr_cmd);
3876 return;
3877 }
3878
3879 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3880
3881 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3882 cmd_pkt->cdb[0] = REQUEST_SENSE;
3883 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3884 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3885 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3886 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3887
3888 ipr_cmd->ioadl[0].flags_and_data_len =
3889 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3890 ipr_cmd->ioadl[0].address =
3891 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3892
3893 ipr_cmd->ioarcb.read_ioadl_len =
3894 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3895 ipr_cmd->ioarcb.read_data_transfer_length =
3896 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3897
3898 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3899 IPR_REQUEST_SENSE_TIMEOUT * 2);
3900}
3901
3902/**
3903 * ipr_erp_cancel_all - Send cancel all to a device
3904 * @ipr_cmd: ipr command struct
3905 *
3906 * This function sends a cancel all to a device to clear the
3907 * queue. If we are running TCQ on the device, QERR is set to 1,
3908 * which means all outstanding ops have been dropped on the floor.
3909 * Cancel all will return them to us.
3910 *
3911 * Return value:
3912 * nothing
3913 **/
3914static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3915{
3916 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3917 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3918 struct ipr_cmd_pkt *cmd_pkt;
3919
3920 res->in_erp = 1;
3921
3922 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3923
3924 if (!scsi_get_tag_type(scsi_cmd->device)) {
3925 ipr_erp_request_sense(ipr_cmd);
3926 return;
3927 }
3928
3929 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3930 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3931 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3932
3933 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3934 IPR_CANCEL_ALL_TIMEOUT);
3935}
3936
3937/**
3938 * ipr_dump_ioasa - Dump contents of IOASA
3939 * @ioa_cfg: ioa config struct
3940 * @ipr_cmd: ipr command struct
fe964d0a 3941 * @res: resource entry struct
1da177e4
LT
3942 *
3943 * This function is invoked by the interrupt handler when ops
3944 * fail. It will log the IOASA if appropriate. Only called
3945 * for GPDD ops.
3946 *
3947 * Return value:
3948 * none
3949 **/
3950static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 3951 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
3952{
3953 int i;
3954 u16 data_len;
3955 u32 ioasc;
3956 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3957 __be32 *ioasa_data = (__be32 *)ioasa;
3958 int error_index;
3959
3960 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3961
3962 if (0 == ioasc)
3963 return;
3964
3965 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3966 return;
3967
3968 error_index = ipr_get_error(ioasc);
3969
3970 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3971 /* Don't log an error if the IOA already logged one */
3972 if (ioasa->ilid != 0)
3973 return;
3974
3975 if (ipr_error_table[error_index].log_ioasa == 0)
3976 return;
3977 }
3978
fe964d0a 3979 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
3980
3981 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3982 data_len = sizeof(struct ipr_ioasa);
3983 else
3984 data_len = be16_to_cpu(ioasa->ret_stat_len);
3985
3986 ipr_err("IOASA Dump:\n");
3987
3988 for (i = 0; i < data_len / 4; i += 4) {
3989 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3990 be32_to_cpu(ioasa_data[i]),
3991 be32_to_cpu(ioasa_data[i+1]),
3992 be32_to_cpu(ioasa_data[i+2]),
3993 be32_to_cpu(ioasa_data[i+3]));
3994 }
3995}
3996
3997/**
3998 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3999 * @ioasa: IOASA
4000 * @sense_buf: sense data buffer
4001 *
4002 * Return value:
4003 * none
4004 **/
4005static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4006{
4007 u32 failing_lba;
4008 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4009 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4010 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4011 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4012
4013 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4014
4015 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4016 return;
4017
4018 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4019
4020 if (ipr_is_vset_device(res) &&
4021 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4022 ioasa->u.vset.failing_lba_hi != 0) {
4023 sense_buf[0] = 0x72;
4024 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4025 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4026 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4027
4028 sense_buf[7] = 12;
4029 sense_buf[8] = 0;
4030 sense_buf[9] = 0x0A;
4031 sense_buf[10] = 0x80;
4032
4033 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4034
4035 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4036 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4037 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4038 sense_buf[15] = failing_lba & 0x000000ff;
4039
4040 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4041
4042 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4043 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4044 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4045 sense_buf[19] = failing_lba & 0x000000ff;
4046 } else {
4047 sense_buf[0] = 0x70;
4048 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4049 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4050 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4051
4052 /* Illegal request */
4053 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4054 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4055 sense_buf[7] = 10; /* additional length */
4056
4057 /* IOARCB was in error */
4058 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4059 sense_buf[15] = 0xC0;
4060 else /* Parameter data was invalid */
4061 sense_buf[15] = 0x80;
4062
4063 sense_buf[16] =
4064 ((IPR_FIELD_POINTER_MASK &
4065 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4066 sense_buf[17] =
4067 (IPR_FIELD_POINTER_MASK &
4068 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4069 } else {
4070 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4071 if (ipr_is_vset_device(res))
4072 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4073 else
4074 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4075
4076 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4077 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4078 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4079 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4080 sense_buf[6] = failing_lba & 0x000000ff;
4081 }
4082
4083 sense_buf[7] = 6; /* additional length */
4084 }
4085 }
4086}
4087
ee0a90fa 4088/**
4089 * ipr_get_autosense - Copy autosense data to sense buffer
4090 * @ipr_cmd: ipr command struct
4091 *
4092 * This function copies the autosense buffer to the buffer
4093 * in the scsi_cmd, if there is autosense available.
4094 *
4095 * Return value:
4096 * 1 if autosense was available / 0 if not
4097 **/
4098static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4099{
4100 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4101
4102 if ((be32_to_cpu(ioasa->ioasc_specific) &
4103 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4104 return 0;
4105
4106 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4107 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4108 SCSI_SENSE_BUFFERSIZE));
4109 return 1;
4110}
4111
1da177e4
LT
4112/**
4113 * ipr_erp_start - Process an error response for a SCSI op
4114 * @ioa_cfg: ioa config struct
4115 * @ipr_cmd: ipr command struct
4116 *
4117 * This function determines whether or not to initiate ERP
4118 * on the affected device.
4119 *
4120 * Return value:
4121 * nothing
4122 **/
4123static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4124 struct ipr_cmnd *ipr_cmd)
4125{
4126 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4127 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4128 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4129
4130 if (!res) {
4131 ipr_scsi_eh_done(ipr_cmd);
4132 return;
4133 }
4134
4135 if (ipr_is_gscsi(res))
fe964d0a 4136 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
1da177e4
LT
4137 else
4138 ipr_gen_sense(ipr_cmd);
4139
4140 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4141 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 4142 if (ipr_is_naca_model(res))
4143 scsi_cmd->result |= (DID_ABORT << 16);
4144 else
4145 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4146 break;
4147 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4148 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4149 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4150 break;
4151 case IPR_IOASC_HW_SEL_TIMEOUT:
4152 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 4153 if (!ipr_is_naca_model(res))
4154 res->needs_sync_complete = 1;
1da177e4
LT
4155 break;
4156 case IPR_IOASC_SYNC_REQUIRED:
4157 if (!res->in_erp)
4158 res->needs_sync_complete = 1;
4159 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4160 break;
4161 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4162 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4163 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4164 break;
4165 case IPR_IOASC_BUS_WAS_RESET:
4166 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4167 /*
4168 * Report the bus reset and ask for a retry. The device
4169 * will give CC/UA the next command.
4170 */
4171 if (!res->resetting_device)
4172 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4173 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4174 if (!ipr_is_naca_model(res))
4175 res->needs_sync_complete = 1;
1da177e4
LT
4176 break;
4177 case IPR_IOASC_HW_DEV_BUS_STATUS:
4178 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4179 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 4180 if (!ipr_get_autosense(ipr_cmd)) {
4181 if (!ipr_is_naca_model(res)) {
4182 ipr_erp_cancel_all(ipr_cmd);
4183 return;
4184 }
4185 }
1da177e4 4186 }
ee0a90fa 4187 if (!ipr_is_naca_model(res))
4188 res->needs_sync_complete = 1;
1da177e4
LT
4189 break;
4190 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4191 break;
4192 default:
4193 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4194 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4195 res->needs_sync_complete = 1;
4196 break;
4197 }
4198
4199 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4200 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4201 scsi_cmd->scsi_done(scsi_cmd);
4202}
4203
4204/**
4205 * ipr_scsi_done - mid-layer done function
4206 * @ipr_cmd: ipr command struct
4207 *
4208 * This function is invoked by the interrupt handler for
4209 * ops generated by the SCSI mid-layer
4210 *
4211 * Return value:
4212 * none
4213 **/
4214static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4215{
4216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4217 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4218 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4219
4220 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4221
4222 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4223 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4224 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4225 scsi_cmd->scsi_done(scsi_cmd);
4226 } else
4227 ipr_erp_start(ioa_cfg, ipr_cmd);
4228}
4229
1da177e4
LT
4230/**
4231 * ipr_queuecommand - Queue a mid-layer request
4232 * @scsi_cmd: scsi command struct
4233 * @done: done function
4234 *
4235 * This function queues a request generated by the mid-layer.
4236 *
4237 * Return value:
4238 * 0 on success
4239 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4240 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4241 **/
4242static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4243 void (*done) (struct scsi_cmnd *))
4244{
4245 struct ipr_ioa_cfg *ioa_cfg;
4246 struct ipr_resource_entry *res;
4247 struct ipr_ioarcb *ioarcb;
4248 struct ipr_cmnd *ipr_cmd;
4249 int rc = 0;
4250
4251 scsi_cmd->scsi_done = done;
4252 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4253 res = scsi_cmd->device->hostdata;
4254 scsi_cmd->result = (DID_OK << 16);
4255
4256 /*
4257 * We are currently blocking all devices due to a host reset
4258 * We have told the host to stop giving us new requests, but
4259 * ERP ops don't count. FIXME
4260 */
4261 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4262 return SCSI_MLQUEUE_HOST_BUSY;
4263
4264 /*
4265 * FIXME - Create scsi_set_host_offline interface
4266 * and the ioa_is_dead check can be removed
4267 */
4268 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4269 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4270 scsi_cmd->result = (DID_NO_CONNECT << 16);
4271 scsi_cmd->scsi_done(scsi_cmd);
4272 return 0;
4273 }
4274
4275 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4276 ioarcb = &ipr_cmd->ioarcb;
4277 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4278
4279 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4280 ipr_cmd->scsi_cmd = scsi_cmd;
4281 ioarcb->res_handle = res->cfgte.res_handle;
4282 ipr_cmd->done = ipr_scsi_done;
4283 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4284
4285 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4286 if (scsi_cmd->underflow == 0)
4287 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4288
4289 if (res->needs_sync_complete) {
4290 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4291 res->needs_sync_complete = 0;
4292 }
4293
4294 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4295 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4296 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4297 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4298 }
4299
4300 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4301 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4302 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4303
1da177e4
LT
4304 if (likely(rc == 0))
4305 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4306
4307 if (likely(rc == 0)) {
4308 mb();
4309 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4310 ioa_cfg->regs.ioarrin_reg);
4311 } else {
4312 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4313 return SCSI_MLQUEUE_HOST_BUSY;
4314 }
4315
4316 return 0;
4317}
4318
4319/**
4320 * ipr_info - Get information about the card/driver
4321 * @scsi_host: scsi host struct
4322 *
4323 * Return value:
4324 * pointer to buffer with description string
4325 **/
4326static const char * ipr_ioa_info(struct Scsi_Host *host)
4327{
4328 static char buffer[512];
4329 struct ipr_ioa_cfg *ioa_cfg;
4330 unsigned long lock_flags = 0;
4331
4332 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4333
4334 spin_lock_irqsave(host->host_lock, lock_flags);
4335 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4336 spin_unlock_irqrestore(host->host_lock, lock_flags);
4337
4338 return buffer;
4339}
4340
4341static struct scsi_host_template driver_template = {
4342 .module = THIS_MODULE,
4343 .name = "IPR",
4344 .info = ipr_ioa_info,
4345 .queuecommand = ipr_queuecommand,
4346 .eh_abort_handler = ipr_eh_abort,
4347 .eh_device_reset_handler = ipr_eh_dev_reset,
4348 .eh_host_reset_handler = ipr_eh_host_reset,
4349 .slave_alloc = ipr_slave_alloc,
4350 .slave_configure = ipr_slave_configure,
4351 .slave_destroy = ipr_slave_destroy,
4352 .change_queue_depth = ipr_change_queue_depth,
4353 .change_queue_type = ipr_change_queue_type,
4354 .bios_param = ipr_biosparam,
4355 .can_queue = IPR_MAX_COMMANDS,
4356 .this_id = -1,
4357 .sg_tablesize = IPR_MAX_SGLIST,
4358 .max_sectors = IPR_IOA_MAX_SECTORS,
4359 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4360 .use_clustering = ENABLE_CLUSTERING,
4361 .shost_attrs = ipr_ioa_attrs,
4362 .sdev_attrs = ipr_dev_attrs,
4363 .proc_name = IPR_NAME
4364};
4365
4366#ifdef CONFIG_PPC_PSERIES
4367static const u16 ipr_blocked_processors[] = {
4368 PV_NORTHSTAR,
4369 PV_PULSAR,
4370 PV_POWER4,
4371 PV_ICESTAR,
4372 PV_SSTAR,
4373 PV_POWER4p,
4374 PV_630,
4375 PV_630p
4376};
4377
4378/**
4379 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4380 * @ioa_cfg: ioa cfg struct
4381 *
4382 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4383 * certain pSeries hardware. This function determines if the given
4384 * adapter is in one of these confgurations or not.
4385 *
4386 * Return value:
4387 * 1 if adapter is not supported / 0 if adapter is supported
4388 **/
4389static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4390{
4391 u8 rev_id;
4392 int i;
4393
4394 if (ioa_cfg->type == 0x5702) {
4395 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4396 &rev_id) == PCIBIOS_SUCCESSFUL) {
4397 if (rev_id < 4) {
4398 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4399 if (__is_processor(ipr_blocked_processors[i]))
4400 return 1;
4401 }
4402 }
4403 }
4404 }
4405 return 0;
4406}
4407#else
4408#define ipr_invalid_adapter(ioa_cfg) 0
4409#endif
4410
4411/**
4412 * ipr_ioa_bringdown_done - IOA bring down completion.
4413 * @ipr_cmd: ipr command struct
4414 *
4415 * This function processes the completion of an adapter bring down.
4416 * It wakes any reset sleepers.
4417 *
4418 * Return value:
4419 * IPR_RC_JOB_RETURN
4420 **/
4421static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4422{
4423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4424
4425 ENTER;
4426 ioa_cfg->in_reset_reload = 0;
4427 ioa_cfg->reset_retries = 0;
4428 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4429 wake_up_all(&ioa_cfg->reset_wait_q);
4430
4431 spin_unlock_irq(ioa_cfg->host->host_lock);
4432 scsi_unblock_requests(ioa_cfg->host);
4433 spin_lock_irq(ioa_cfg->host->host_lock);
4434 LEAVE;
4435
4436 return IPR_RC_JOB_RETURN;
4437}
4438
4439/**
4440 * ipr_ioa_reset_done - IOA reset completion.
4441 * @ipr_cmd: ipr command struct
4442 *
4443 * This function processes the completion of an adapter reset.
4444 * It schedules any necessary mid-layer add/removes and
4445 * wakes any reset sleepers.
4446 *
4447 * Return value:
4448 * IPR_RC_JOB_RETURN
4449 **/
4450static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4451{
4452 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4453 struct ipr_resource_entry *res;
4454 struct ipr_hostrcb *hostrcb, *temp;
4455 int i = 0;
4456
4457 ENTER;
4458 ioa_cfg->in_reset_reload = 0;
4459 ioa_cfg->allow_cmds = 1;
4460 ioa_cfg->reset_cmd = NULL;
3d1d0da6 4461 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
4462
4463 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4464 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4465 ipr_trace;
4466 break;
4467 }
4468 }
4469 schedule_work(&ioa_cfg->work_q);
4470
4471 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4472 list_del(&hostrcb->queue);
4473 if (i++ < IPR_NUM_LOG_HCAMS)
4474 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4475 else
4476 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4477 }
4478
4479 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4480
4481 ioa_cfg->reset_retries = 0;
4482 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4483 wake_up_all(&ioa_cfg->reset_wait_q);
4484
4485 spin_unlock_irq(ioa_cfg->host->host_lock);
4486 scsi_unblock_requests(ioa_cfg->host);
4487 spin_lock_irq(ioa_cfg->host->host_lock);
4488
4489 if (!ioa_cfg->allow_cmds)
4490 scsi_block_requests(ioa_cfg->host);
4491
4492 LEAVE;
4493 return IPR_RC_JOB_RETURN;
4494}
4495
4496/**
4497 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4498 * @supported_dev: supported device struct
4499 * @vpids: vendor product id struct
4500 *
4501 * Return value:
4502 * none
4503 **/
4504static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4505 struct ipr_std_inq_vpids *vpids)
4506{
4507 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4508 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4509 supported_dev->num_records = 1;
4510 supported_dev->data_length =
4511 cpu_to_be16(sizeof(struct ipr_supported_device));
4512 supported_dev->reserved = 0;
4513}
4514
4515/**
4516 * ipr_set_supported_devs - Send Set Supported Devices for a device
4517 * @ipr_cmd: ipr command struct
4518 *
4519 * This function send a Set Supported Devices to the adapter
4520 *
4521 * Return value:
4522 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4523 **/
4524static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4525{
4526 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4527 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4528 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4529 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4530 struct ipr_resource_entry *res = ipr_cmd->u.res;
4531
4532 ipr_cmd->job_step = ipr_ioa_reset_done;
4533
4534 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 4535 if (!ipr_is_scsi_disk(res))
1da177e4
LT
4536 continue;
4537
4538 ipr_cmd->u.res = res;
4539 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4540
4541 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4542 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4543 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4544
4545 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4546 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4547 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4548
4549 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4550 sizeof(struct ipr_supported_device));
4551 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4552 offsetof(struct ipr_misc_cbs, supp_dev));
4553 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4554 ioarcb->write_data_transfer_length =
4555 cpu_to_be32(sizeof(struct ipr_supported_device));
4556
4557 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4558 IPR_SET_SUP_DEVICE_TIMEOUT);
4559
4560 ipr_cmd->job_step = ipr_set_supported_devs;
4561 return IPR_RC_JOB_RETURN;
4562 }
4563
4564 return IPR_RC_JOB_CONTINUE;
4565}
4566
62275040 4567/**
4568 * ipr_setup_write_cache - Disable write cache if needed
4569 * @ipr_cmd: ipr command struct
4570 *
4571 * This function sets up adapters write cache to desired setting
4572 *
4573 * Return value:
4574 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4575 **/
4576static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4577{
4578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4579
4580 ipr_cmd->job_step = ipr_set_supported_devs;
4581 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4582 struct ipr_resource_entry, queue);
4583
4584 if (ioa_cfg->cache_state != CACHE_DISABLED)
4585 return IPR_RC_JOB_CONTINUE;
4586
4587 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4588 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4589 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4590 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4591
4592 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4593
4594 return IPR_RC_JOB_RETURN;
4595}
4596
1da177e4
LT
4597/**
4598 * ipr_get_mode_page - Locate specified mode page
4599 * @mode_pages: mode page buffer
4600 * @page_code: page code to find
4601 * @len: minimum required length for mode page
4602 *
4603 * Return value:
4604 * pointer to mode page / NULL on failure
4605 **/
4606static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4607 u32 page_code, u32 len)
4608{
4609 struct ipr_mode_page_hdr *mode_hdr;
4610 u32 page_length;
4611 u32 length;
4612
4613 if (!mode_pages || (mode_pages->hdr.length == 0))
4614 return NULL;
4615
4616 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4617 mode_hdr = (struct ipr_mode_page_hdr *)
4618 (mode_pages->data + mode_pages->hdr.block_desc_len);
4619
4620 while (length) {
4621 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4622 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4623 return mode_hdr;
4624 break;
4625 } else {
4626 page_length = (sizeof(struct ipr_mode_page_hdr) +
4627 mode_hdr->page_length);
4628 length -= page_length;
4629 mode_hdr = (struct ipr_mode_page_hdr *)
4630 ((unsigned long)mode_hdr + page_length);
4631 }
4632 }
4633 return NULL;
4634}
4635
4636/**
4637 * ipr_check_term_power - Check for term power errors
4638 * @ioa_cfg: ioa config struct
4639 * @mode_pages: IOAFP mode pages buffer
4640 *
4641 * Check the IOAFP's mode page 28 for term power errors
4642 *
4643 * Return value:
4644 * nothing
4645 **/
4646static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4647 struct ipr_mode_pages *mode_pages)
4648{
4649 int i;
4650 int entry_length;
4651 struct ipr_dev_bus_entry *bus;
4652 struct ipr_mode_page28 *mode_page;
4653
4654 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4655 sizeof(struct ipr_mode_page28));
4656
4657 entry_length = mode_page->entry_length;
4658
4659 bus = mode_page->bus;
4660
4661 for (i = 0; i < mode_page->num_entries; i++) {
4662 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4663 dev_err(&ioa_cfg->pdev->dev,
4664 "Term power is absent on scsi bus %d\n",
4665 bus->res_addr.bus);
4666 }
4667
4668 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4669 }
4670}
4671
4672/**
4673 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4674 * @ioa_cfg: ioa config struct
4675 *
4676 * Looks through the config table checking for SES devices. If
4677 * the SES device is in the SES table indicating a maximum SCSI
4678 * bus speed, the speed is limited for the bus.
4679 *
4680 * Return value:
4681 * none
4682 **/
4683static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4684{
4685 u32 max_xfer_rate;
4686 int i;
4687
4688 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4689 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4690 ioa_cfg->bus_attr[i].bus_width);
4691
4692 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4693 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4694 }
4695}
4696
4697/**
4698 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4699 * @ioa_cfg: ioa config struct
4700 * @mode_pages: mode page 28 buffer
4701 *
4702 * Updates mode page 28 based on driver configuration
4703 *
4704 * Return value:
4705 * none
4706 **/
4707static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4708 struct ipr_mode_pages *mode_pages)
4709{
4710 int i, entry_length;
4711 struct ipr_dev_bus_entry *bus;
4712 struct ipr_bus_attributes *bus_attr;
4713 struct ipr_mode_page28 *mode_page;
4714
4715 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4716 sizeof(struct ipr_mode_page28));
4717
4718 entry_length = mode_page->entry_length;
4719
4720 /* Loop for each device bus entry */
4721 for (i = 0, bus = mode_page->bus;
4722 i < mode_page->num_entries;
4723 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4724 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4725 dev_err(&ioa_cfg->pdev->dev,
4726 "Invalid resource address reported: 0x%08X\n",
4727 IPR_GET_PHYS_LOC(bus->res_addr));
4728 continue;
4729 }
4730
4731 bus_attr = &ioa_cfg->bus_attr[i];
4732 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4733 bus->bus_width = bus_attr->bus_width;
4734 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4735 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4736 if (bus_attr->qas_enabled)
4737 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4738 else
4739 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4740 }
4741}
4742
4743/**
4744 * ipr_build_mode_select - Build a mode select command
4745 * @ipr_cmd: ipr command struct
4746 * @res_handle: resource handle to send command to
4747 * @parm: Byte 2 of Mode Sense command
4748 * @dma_addr: DMA buffer address
4749 * @xfer_len: data transfer length
4750 *
4751 * Return value:
4752 * none
4753 **/
4754static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4755 __be32 res_handle, u8 parm, u32 dma_addr,
4756 u8 xfer_len)
4757{
4758 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4759 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4760
4761 ioarcb->res_handle = res_handle;
4762 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4763 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4764 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4765 ioarcb->cmd_pkt.cdb[1] = parm;
4766 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4767
4768 ioadl->flags_and_data_len =
4769 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4770 ioadl->address = cpu_to_be32(dma_addr);
4771 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4772 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4773}
4774
4775/**
4776 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4777 * @ipr_cmd: ipr command struct
4778 *
4779 * This function sets up the SCSI bus attributes and sends
4780 * a Mode Select for Page 28 to activate them.
4781 *
4782 * Return value:
4783 * IPR_RC_JOB_RETURN
4784 **/
4785static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4786{
4787 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4788 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4789 int length;
4790
4791 ENTER;
4733804c
BK
4792 ipr_scsi_bus_speed_limit(ioa_cfg);
4793 ipr_check_term_power(ioa_cfg, mode_pages);
4794 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4795 length = mode_pages->hdr.length + 1;
4796 mode_pages->hdr.length = 0;
1da177e4
LT
4797
4798 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4799 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4800 length);
4801
62275040 4802 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
4803 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4804
4805 LEAVE;
4806 return IPR_RC_JOB_RETURN;
4807}
4808
4809/**
4810 * ipr_build_mode_sense - Builds a mode sense command
4811 * @ipr_cmd: ipr command struct
4812 * @res: resource entry struct
4813 * @parm: Byte 2 of mode sense command
4814 * @dma_addr: DMA address of mode sense buffer
4815 * @xfer_len: Size of DMA buffer
4816 *
4817 * Return value:
4818 * none
4819 **/
4820static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4821 __be32 res_handle,
4822 u8 parm, u32 dma_addr, u8 xfer_len)
4823{
4824 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4825 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4826
4827 ioarcb->res_handle = res_handle;
4828 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4829 ioarcb->cmd_pkt.cdb[2] = parm;
4830 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4831 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4832
4833 ioadl->flags_and_data_len =
4834 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4835 ioadl->address = cpu_to_be32(dma_addr);
4836 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4837 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4838}
4839
dfed823e 4840/**
4841 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4842 * @ipr_cmd: ipr command struct
4843 *
4844 * This function handles the failure of an IOA bringup command.
4845 *
4846 * Return value:
4847 * IPR_RC_JOB_RETURN
4848 **/
4849static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4850{
4851 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4852 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4853
4854 dev_err(&ioa_cfg->pdev->dev,
4855 "0x%02X failed with IOASC: 0x%08X\n",
4856 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4857
4858 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4859 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4860 return IPR_RC_JOB_RETURN;
4861}
4862
4863/**
4864 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4865 * @ipr_cmd: ipr command struct
4866 *
4867 * This function handles the failure of a Mode Sense to the IOAFP.
4868 * Some adapters do not handle all mode pages.
4869 *
4870 * Return value:
4871 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4872 **/
4873static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4874{
4875 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4876
4877 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4878 ipr_cmd->job_step = ipr_setup_write_cache;
4879 return IPR_RC_JOB_CONTINUE;
4880 }
4881
4882 return ipr_reset_cmd_failed(ipr_cmd);
4883}
4884
1da177e4
LT
4885/**
4886 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4887 * @ipr_cmd: ipr command struct
4888 *
4889 * This function send a Page 28 mode sense to the IOA to
4890 * retrieve SCSI bus attributes.
4891 *
4892 * Return value:
4893 * IPR_RC_JOB_RETURN
4894 **/
4895static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4896{
4897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4898
4899 ENTER;
4900 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4901 0x28, ioa_cfg->vpd_cbs_dma +
4902 offsetof(struct ipr_misc_cbs, mode_pages),
4903 sizeof(struct ipr_mode_pages));
4904
4905 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 4906 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
4907
4908 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4909
4910 LEAVE;
4911 return IPR_RC_JOB_RETURN;
4912}
4913
4914/**
4915 * ipr_init_res_table - Initialize the resource table
4916 * @ipr_cmd: ipr command struct
4917 *
4918 * This function looks through the existing resource table, comparing
4919 * it with the config table. This function will take care of old/new
4920 * devices and schedule adding/removing them from the mid-layer
4921 * as appropriate.
4922 *
4923 * Return value:
4924 * IPR_RC_JOB_CONTINUE
4925 **/
4926static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4927{
4928 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4929 struct ipr_resource_entry *res, *temp;
4930 struct ipr_config_table_entry *cfgte;
4931 int found, i;
4932 LIST_HEAD(old_res);
4933
4934 ENTER;
4935 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4936 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4937
4938 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4939 list_move_tail(&res->queue, &old_res);
4940
4941 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4942 cfgte = &ioa_cfg->cfg_table->dev[i];
4943 found = 0;
4944
4945 list_for_each_entry_safe(res, temp, &old_res, queue) {
4946 if (!memcmp(&res->cfgte.res_addr,
4947 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4948 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4949 found = 1;
4950 break;
4951 }
4952 }
4953
4954 if (!found) {
4955 if (list_empty(&ioa_cfg->free_res_q)) {
4956 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4957 break;
4958 }
4959
4960 found = 1;
4961 res = list_entry(ioa_cfg->free_res_q.next,
4962 struct ipr_resource_entry, queue);
4963 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4964 ipr_init_res_entry(res);
4965 res->add_to_ml = 1;
4966 }
4967
4968 if (found)
4969 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4970 }
4971
4972 list_for_each_entry_safe(res, temp, &old_res, queue) {
4973 if (res->sdev) {
4974 res->del_from_ml = 1;
1121b794 4975 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
4976 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4977 } else {
4978 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4979 }
4980 }
4981
4982 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4983
4984 LEAVE;
4985 return IPR_RC_JOB_CONTINUE;
4986}
4987
4988/**
4989 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4990 * @ipr_cmd: ipr command struct
4991 *
4992 * This function sends a Query IOA Configuration command
4993 * to the adapter to retrieve the IOA configuration table.
4994 *
4995 * Return value:
4996 * IPR_RC_JOB_RETURN
4997 **/
4998static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4999{
5000 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5001 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5002 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5003 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5004
5005 ENTER;
5006 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5007 ucode_vpd->major_release, ucode_vpd->card_type,
5008 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5009 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5011
5012 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5013 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5014 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5015
5016 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5017 ioarcb->read_data_transfer_length =
5018 cpu_to_be32(sizeof(struct ipr_config_table));
5019
5020 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5021 ioadl->flags_and_data_len =
5022 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5023
5024 ipr_cmd->job_step = ipr_init_res_table;
5025
5026 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5027
5028 LEAVE;
5029 return IPR_RC_JOB_RETURN;
5030}
5031
5032/**
5033 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5034 * @ipr_cmd: ipr command struct
5035 *
5036 * This utility function sends an inquiry to the adapter.
5037 *
5038 * Return value:
5039 * none
5040 **/
5041static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5042 u32 dma_addr, u8 xfer_len)
5043{
5044 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5045 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5046
5047 ENTER;
5048 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5049 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5050
5051 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5052 ioarcb->cmd_pkt.cdb[1] = flags;
5053 ioarcb->cmd_pkt.cdb[2] = page;
5054 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5055
5056 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5057 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5058
5059 ioadl->address = cpu_to_be32(dma_addr);
5060 ioadl->flags_and_data_len =
5061 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5062
5063 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5064 LEAVE;
5065}
5066
62275040 5067/**
5068 * ipr_inquiry_page_supported - Is the given inquiry page supported
5069 * @page0: inquiry page 0 buffer
5070 * @page: page code.
5071 *
5072 * This function determines if the specified inquiry page is supported.
5073 *
5074 * Return value:
5075 * 1 if page is supported / 0 if not
5076 **/
5077static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5078{
5079 int i;
5080
5081 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5082 if (page0->page[i] == page)
5083 return 1;
5084
5085 return 0;
5086}
5087
1da177e4
LT
5088/**
5089 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5090 * @ipr_cmd: ipr command struct
5091 *
5092 * This function sends a Page 3 inquiry to the adapter
5093 * to retrieve software VPD information.
5094 *
5095 * Return value:
5096 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5097 **/
5098static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 5099{
5100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5102
5103 ENTER;
5104
5105 if (!ipr_inquiry_page_supported(page0, 1))
5106 ioa_cfg->cache_state = CACHE_NONE;
5107
5108 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5109
5110 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5111 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5112 sizeof(struct ipr_inquiry_page3));
5113
5114 LEAVE;
5115 return IPR_RC_JOB_RETURN;
5116}
5117
5118/**
5119 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5120 * @ipr_cmd: ipr command struct
5121 *
5122 * This function sends a Page 0 inquiry to the adapter
5123 * to retrieve supported inquiry pages.
5124 *
5125 * Return value:
5126 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5127 **/
5128static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
5129{
5130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5131 char type[5];
5132
5133 ENTER;
5134
5135 /* Grab the type out of the VPD and store it away */
5136 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5137 type[4] = '\0';
5138 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5139
62275040 5140 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 5141
62275040 5142 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5143 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5144 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
5145
5146 LEAVE;
5147 return IPR_RC_JOB_RETURN;
5148}
5149
5150/**
5151 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5152 * @ipr_cmd: ipr command struct
5153 *
5154 * This function sends a standard inquiry to the adapter.
5155 *
5156 * Return value:
5157 * IPR_RC_JOB_RETURN
5158 **/
5159static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5160{
5161 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5162
5163 ENTER;
62275040 5164 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
5165
5166 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5167 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5168 sizeof(struct ipr_ioa_vpd));
5169
5170 LEAVE;
5171 return IPR_RC_JOB_RETURN;
5172}
5173
5174/**
5175 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5176 * @ipr_cmd: ipr command struct
5177 *
5178 * This function send an Identify Host Request Response Queue
5179 * command to establish the HRRQ with the adapter.
5180 *
5181 * Return value:
5182 * IPR_RC_JOB_RETURN
5183 **/
5184static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5185{
5186 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5187 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5188
5189 ENTER;
5190 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5191
5192 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5193 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5194
5195 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5196 ioarcb->cmd_pkt.cdb[2] =
5197 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5198 ioarcb->cmd_pkt.cdb[3] =
5199 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5200 ioarcb->cmd_pkt.cdb[4] =
5201 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5202 ioarcb->cmd_pkt.cdb[5] =
5203 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
5204 ioarcb->cmd_pkt.cdb[7] =
5205 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5206 ioarcb->cmd_pkt.cdb[8] =
5207 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5208
5209 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5210
5211 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5212
5213 LEAVE;
5214 return IPR_RC_JOB_RETURN;
5215}
5216
5217/**
5218 * ipr_reset_timer_done - Adapter reset timer function
5219 * @ipr_cmd: ipr command struct
5220 *
5221 * Description: This function is used in adapter reset processing
5222 * for timing events. If the reset_cmd pointer in the IOA
5223 * config struct is not this adapter's we are doing nested
5224 * resets and fail_all_ops will take care of freeing the
5225 * command block.
5226 *
5227 * Return value:
5228 * none
5229 **/
5230static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5231{
5232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5233 unsigned long lock_flags = 0;
5234
5235 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5236
5237 if (ioa_cfg->reset_cmd == ipr_cmd) {
5238 list_del(&ipr_cmd->queue);
5239 ipr_cmd->done(ipr_cmd);
5240 }
5241
5242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5243}
5244
5245/**
5246 * ipr_reset_start_timer - Start a timer for adapter reset job
5247 * @ipr_cmd: ipr command struct
5248 * @timeout: timeout value
5249 *
5250 * Description: This function is used in adapter reset processing
5251 * for timing events. If the reset_cmd pointer in the IOA
5252 * config struct is not this adapter's we are doing nested
5253 * resets and fail_all_ops will take care of freeing the
5254 * command block.
5255 *
5256 * Return value:
5257 * none
5258 **/
5259static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5260 unsigned long timeout)
5261{
5262 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5263 ipr_cmd->done = ipr_reset_ioa_job;
5264
5265 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5266 ipr_cmd->timer.expires = jiffies + timeout;
5267 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5268 add_timer(&ipr_cmd->timer);
5269}
5270
5271/**
5272 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5273 * @ioa_cfg: ioa cfg struct
5274 *
5275 * Return value:
5276 * nothing
5277 **/
5278static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5279{
5280 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5281
5282 /* Initialize Host RRQ pointers */
5283 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5284 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5285 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5286 ioa_cfg->toggle_bit = 1;
5287
5288 /* Zero out config table */
5289 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5290}
5291
5292/**
5293 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5294 * @ipr_cmd: ipr command struct
5295 *
5296 * This function reinitializes some control blocks and
5297 * enables destructive diagnostics on the adapter.
5298 *
5299 * Return value:
5300 * IPR_RC_JOB_RETURN
5301 **/
5302static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5303{
5304 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5305 volatile u32 int_reg;
5306
5307 ENTER;
5308 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5309 ipr_init_ioa_mem(ioa_cfg);
5310
5311 ioa_cfg->allow_interrupts = 1;
5312 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5313
5314 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5315 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5316 ioa_cfg->regs.clr_interrupt_mask_reg);
5317 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5318 return IPR_RC_JOB_CONTINUE;
5319 }
5320
5321 /* Enable destructive diagnostics on IOA */
3d1d0da6 5322 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
5323
5324 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5325 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5326
5327 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5328
5329 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5330 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5331 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5332 ipr_cmd->done = ipr_reset_ioa_job;
5333 add_timer(&ipr_cmd->timer);
5334 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5335
5336 LEAVE;
5337 return IPR_RC_JOB_RETURN;
5338}
5339
5340/**
5341 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5342 * @ipr_cmd: ipr command struct
5343 *
5344 * This function is invoked when an adapter dump has run out
5345 * of processing time.
5346 *
5347 * Return value:
5348 * IPR_RC_JOB_CONTINUE
5349 **/
5350static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5351{
5352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5353
5354 if (ioa_cfg->sdt_state == GET_DUMP)
5355 ioa_cfg->sdt_state = ABORT_DUMP;
5356
5357 ipr_cmd->job_step = ipr_reset_alert;
5358
5359 return IPR_RC_JOB_CONTINUE;
5360}
5361
5362/**
5363 * ipr_unit_check_no_data - Log a unit check/no data error log
5364 * @ioa_cfg: ioa config struct
5365 *
5366 * Logs an error indicating the adapter unit checked, but for some
5367 * reason, we were unable to fetch the unit check buffer.
5368 *
5369 * Return value:
5370 * nothing
5371 **/
5372static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5373{
5374 ioa_cfg->errors_logged++;
5375 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5376}
5377
5378/**
5379 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5380 * @ioa_cfg: ioa config struct
5381 *
5382 * Fetches the unit check buffer from the adapter by clocking the data
5383 * through the mailbox register.
5384 *
5385 * Return value:
5386 * nothing
5387 **/
5388static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5389{
5390 unsigned long mailbox;
5391 struct ipr_hostrcb *hostrcb;
5392 struct ipr_uc_sdt sdt;
5393 int rc, length;
5394
5395 mailbox = readl(ioa_cfg->ioa_mailbox);
5396
5397 if (!ipr_sdt_is_fmt2(mailbox)) {
5398 ipr_unit_check_no_data(ioa_cfg);
5399 return;
5400 }
5401
5402 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5403 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5404 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5405
5406 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5407 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5408 ipr_unit_check_no_data(ioa_cfg);
5409 return;
5410 }
5411
5412 /* Find length of the first sdt entry (UC buffer) */
5413 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5414 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5415
5416 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5417 struct ipr_hostrcb, queue);
5418 list_del(&hostrcb->queue);
5419 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5420
5421 rc = ipr_get_ldump_data_section(ioa_cfg,
5422 be32_to_cpu(sdt.entry[0].bar_str_offset),
5423 (__be32 *)&hostrcb->hcam,
5424 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5425
5426 if (!rc)
5427 ipr_handle_log_data(ioa_cfg, hostrcb);
5428 else
5429 ipr_unit_check_no_data(ioa_cfg);
5430
5431 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5432}
5433
5434/**
5435 * ipr_reset_restore_cfg_space - Restore PCI config space.
5436 * @ipr_cmd: ipr command struct
5437 *
5438 * Description: This function restores the saved PCI config space of
5439 * the adapter, fails all outstanding ops back to the callers, and
5440 * fetches the dump/unit check if applicable to this reset.
5441 *
5442 * Return value:
5443 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5444 **/
5445static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5446{
5447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5448 int rc;
5449
5450 ENTER;
b30197d2 5451 pci_unblock_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5452 rc = pci_restore_state(ioa_cfg->pdev);
5453
5454 if (rc != PCIBIOS_SUCCESSFUL) {
5455 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5456 return IPR_RC_JOB_CONTINUE;
5457 }
5458
5459 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5460 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5461 return IPR_RC_JOB_CONTINUE;
5462 }
5463
5464 ipr_fail_all_ops(ioa_cfg);
5465
5466 if (ioa_cfg->ioa_unit_checked) {
5467 ioa_cfg->ioa_unit_checked = 0;
5468 ipr_get_unit_check_buffer(ioa_cfg);
5469 ipr_cmd->job_step = ipr_reset_alert;
5470 ipr_reset_start_timer(ipr_cmd, 0);
5471 return IPR_RC_JOB_RETURN;
5472 }
5473
5474 if (ioa_cfg->in_ioa_bringdown) {
5475 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5476 } else {
5477 ipr_cmd->job_step = ipr_reset_enable_ioa;
5478
5479 if (GET_DUMP == ioa_cfg->sdt_state) {
5480 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5481 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5482 schedule_work(&ioa_cfg->work_q);
5483 return IPR_RC_JOB_RETURN;
5484 }
5485 }
5486
5487 ENTER;
5488 return IPR_RC_JOB_CONTINUE;
5489}
5490
5491/**
5492 * ipr_reset_start_bist - Run BIST on the adapter.
5493 * @ipr_cmd: ipr command struct
5494 *
5495 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5496 *
5497 * Return value:
5498 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5499 **/
5500static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5501{
5502 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5503 int rc;
5504
5505 ENTER;
b30197d2 5506 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
5507 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5508
5509 if (rc != PCIBIOS_SUCCESSFUL) {
5510 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5511 rc = IPR_RC_JOB_CONTINUE;
5512 } else {
5513 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5514 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5515 rc = IPR_RC_JOB_RETURN;
5516 }
5517
5518 LEAVE;
5519 return rc;
5520}
5521
5522/**
5523 * ipr_reset_allowed - Query whether or not IOA can be reset
5524 * @ioa_cfg: ioa config struct
5525 *
5526 * Return value:
5527 * 0 if reset not allowed / non-zero if reset is allowed
5528 **/
5529static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5530{
5531 volatile u32 temp_reg;
5532
5533 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5534 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5535}
5536
5537/**
5538 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5539 * @ipr_cmd: ipr command struct
5540 *
5541 * Description: This function waits for adapter permission to run BIST,
5542 * then runs BIST. If the adapter does not give permission after a
5543 * reasonable time, we will reset the adapter anyway. The impact of
5544 * resetting the adapter without warning the adapter is the risk of
5545 * losing the persistent error log on the adapter. If the adapter is
5546 * reset while it is writing to the flash on the adapter, the flash
5547 * segment will have bad ECC and be zeroed.
5548 *
5549 * Return value:
5550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5551 **/
5552static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5553{
5554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5555 int rc = IPR_RC_JOB_RETURN;
5556
5557 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5558 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5559 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5560 } else {
5561 ipr_cmd->job_step = ipr_reset_start_bist;
5562 rc = IPR_RC_JOB_CONTINUE;
5563 }
5564
5565 return rc;
5566}
5567
5568/**
5569 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5570 * @ipr_cmd: ipr command struct
5571 *
5572 * Description: This function alerts the adapter that it will be reset.
5573 * If memory space is not currently enabled, proceed directly
5574 * to running BIST on the adapter. The timer must always be started
5575 * so we guarantee we do not run BIST from ipr_isr.
5576 *
5577 * Return value:
5578 * IPR_RC_JOB_RETURN
5579 **/
5580static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5581{
5582 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5583 u16 cmd_reg;
5584 int rc;
5585
5586 ENTER;
5587 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5588
5589 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5590 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5591 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5592 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5593 } else {
5594 ipr_cmd->job_step = ipr_reset_start_bist;
5595 }
5596
5597 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5598 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5599
5600 LEAVE;
5601 return IPR_RC_JOB_RETURN;
5602}
5603
5604/**
5605 * ipr_reset_ucode_download_done - Microcode download completion
5606 * @ipr_cmd: ipr command struct
5607 *
5608 * Description: This function unmaps the microcode download buffer.
5609 *
5610 * Return value:
5611 * IPR_RC_JOB_CONTINUE
5612 **/
5613static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5614{
5615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5616 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5617
5618 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5619 sglist->num_sg, DMA_TO_DEVICE);
5620
5621 ipr_cmd->job_step = ipr_reset_alert;
5622 return IPR_RC_JOB_CONTINUE;
5623}
5624
5625/**
5626 * ipr_reset_ucode_download - Download microcode to the adapter
5627 * @ipr_cmd: ipr command struct
5628 *
5629 * Description: This function checks to see if it there is microcode
5630 * to download to the adapter. If there is, a download is performed.
5631 *
5632 * Return value:
5633 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5634 **/
5635static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5636{
5637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5638 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5639
5640 ENTER;
5641 ipr_cmd->job_step = ipr_reset_alert;
5642
5643 if (!sglist)
5644 return IPR_RC_JOB_CONTINUE;
5645
5646 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5647 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5648 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5649 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5650 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5651 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5652 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5653
12baa420 5654 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
5655 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5656
5657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5658 IPR_WRITE_BUFFER_TIMEOUT);
5659
5660 LEAVE;
5661 return IPR_RC_JOB_RETURN;
5662}
5663
5664/**
5665 * ipr_reset_shutdown_ioa - Shutdown the adapter
5666 * @ipr_cmd: ipr command struct
5667 *
5668 * Description: This function issues an adapter shutdown of the
5669 * specified type to the specified adapter as part of the
5670 * adapter reset job.
5671 *
5672 * Return value:
5673 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5674 **/
5675static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5676{
5677 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5678 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5679 unsigned long timeout;
5680 int rc = IPR_RC_JOB_CONTINUE;
5681
5682 ENTER;
5683 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5684 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5685 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5686 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5687 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5688
5689 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5690 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5691 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5692 timeout = IPR_INTERNAL_TIMEOUT;
5693 else
5694 timeout = IPR_SHUTDOWN_TIMEOUT;
5695
5696 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5697
5698 rc = IPR_RC_JOB_RETURN;
5699 ipr_cmd->job_step = ipr_reset_ucode_download;
5700 } else
5701 ipr_cmd->job_step = ipr_reset_alert;
5702
5703 LEAVE;
5704 return rc;
5705}
5706
5707/**
5708 * ipr_reset_ioa_job - Adapter reset job
5709 * @ipr_cmd: ipr command struct
5710 *
5711 * Description: This function is the job router for the adapter reset job.
5712 *
5713 * Return value:
5714 * none
5715 **/
5716static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5717{
5718 u32 rc, ioasc;
1da177e4
LT
5719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5720
5721 do {
5722 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5723
5724 if (ioa_cfg->reset_cmd != ipr_cmd) {
5725 /*
5726 * We are doing nested adapter resets and this is
5727 * not the current reset job.
5728 */
5729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5730 return;
5731 }
5732
5733 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 5734 rc = ipr_cmd->job_step_failed(ipr_cmd);
5735 if (rc == IPR_RC_JOB_RETURN)
5736 return;
1da177e4
LT
5737 }
5738
5739 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 5740 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
5741 rc = ipr_cmd->job_step(ipr_cmd);
5742 } while(rc == IPR_RC_JOB_CONTINUE);
5743}
5744
5745/**
5746 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5747 * @ioa_cfg: ioa config struct
5748 * @job_step: first job step of reset job
5749 * @shutdown_type: shutdown type
5750 *
5751 * Description: This function will initiate the reset of the given adapter
5752 * starting at the selected job step.
5753 * If the caller needs to wait on the completion of the reset,
5754 * the caller must sleep on the reset_wait_q.
5755 *
5756 * Return value:
5757 * none
5758 **/
5759static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5760 int (*job_step) (struct ipr_cmnd *),
5761 enum ipr_shutdown_type shutdown_type)
5762{
5763 struct ipr_cmnd *ipr_cmd;
5764
5765 ioa_cfg->in_reset_reload = 1;
5766 ioa_cfg->allow_cmds = 0;
5767 scsi_block_requests(ioa_cfg->host);
5768
5769 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5770 ioa_cfg->reset_cmd = ipr_cmd;
5771 ipr_cmd->job_step = job_step;
5772 ipr_cmd->u.shutdown_type = shutdown_type;
5773
5774 ipr_reset_ioa_job(ipr_cmd);
5775}
5776
5777/**
5778 * ipr_initiate_ioa_reset - Initiate an adapter reset
5779 * @ioa_cfg: ioa config struct
5780 * @shutdown_type: shutdown type
5781 *
5782 * Description: This function will initiate the reset of the given adapter.
5783 * If the caller needs to wait on the completion of the reset,
5784 * the caller must sleep on the reset_wait_q.
5785 *
5786 * Return value:
5787 * none
5788 **/
5789static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5790 enum ipr_shutdown_type shutdown_type)
5791{
5792 if (ioa_cfg->ioa_is_dead)
5793 return;
5794
5795 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5796 ioa_cfg->sdt_state = ABORT_DUMP;
5797
5798 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5799 dev_err(&ioa_cfg->pdev->dev,
5800 "IOA taken offline - error recovery failed\n");
5801
5802 ioa_cfg->reset_retries = 0;
5803 ioa_cfg->ioa_is_dead = 1;
5804
5805 if (ioa_cfg->in_ioa_bringdown) {
5806 ioa_cfg->reset_cmd = NULL;
5807 ioa_cfg->in_reset_reload = 0;
5808 ipr_fail_all_ops(ioa_cfg);
5809 wake_up_all(&ioa_cfg->reset_wait_q);
5810
5811 spin_unlock_irq(ioa_cfg->host->host_lock);
5812 scsi_unblock_requests(ioa_cfg->host);
5813 spin_lock_irq(ioa_cfg->host->host_lock);
5814 return;
5815 } else {
5816 ioa_cfg->in_ioa_bringdown = 1;
5817 shutdown_type = IPR_SHUTDOWN_NONE;
5818 }
5819 }
5820
5821 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5822 shutdown_type);
5823}
5824
f8a88b19
LV
5825/**
5826 * ipr_reset_freeze - Hold off all I/O activity
5827 * @ipr_cmd: ipr command struct
5828 *
5829 * Description: If the PCI slot is frozen, hold off all I/O
5830 * activity; then, as soon as the slot is available again,
5831 * initiate an adapter reset.
5832 */
5833static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5834{
5835 /* Disallow new interrupts, avoid loop */
5836 ipr_cmd->ioa_cfg->allow_interrupts = 0;
5837 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5838 ipr_cmd->done = ipr_reset_ioa_job;
5839 return IPR_RC_JOB_RETURN;
5840}
5841
5842/**
5843 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5844 * @pdev: PCI device struct
5845 *
5846 * Description: This routine is called to tell us that the PCI bus
5847 * is down. Can't do anything here, except put the device driver
5848 * into a holding pattern, waiting for the PCI bus to come back.
5849 */
5850static void ipr_pci_frozen(struct pci_dev *pdev)
5851{
5852 unsigned long flags = 0;
5853 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5854
5855 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5856 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5858}
5859
5860/**
5861 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5862 * @pdev: PCI device struct
5863 *
5864 * Description: This routine is called by the pci error recovery
5865 * code after the PCI slot has been reset, just before we
5866 * should resume normal operations.
5867 */
5868static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5869{
5870 unsigned long flags = 0;
5871 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5872
5873 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5874 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5875 IPR_SHUTDOWN_NONE);
5876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5877 return PCI_ERS_RESULT_RECOVERED;
5878}
5879
5880/**
5881 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5882 * @pdev: PCI device struct
5883 *
5884 * Description: This routine is called when the PCI bus has
5885 * permanently failed.
5886 */
5887static void ipr_pci_perm_failure(struct pci_dev *pdev)
5888{
5889 unsigned long flags = 0;
5890 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5891
5892 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5893 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5894 ioa_cfg->sdt_state = ABORT_DUMP;
5895 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5896 ioa_cfg->in_ioa_bringdown = 1;
5897 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5899}
5900
5901/**
5902 * ipr_pci_error_detected - Called when a PCI error is detected.
5903 * @pdev: PCI device struct
5904 * @state: PCI channel state
5905 *
5906 * Description: Called when a PCI error is detected.
5907 *
5908 * Return value:
5909 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5910 */
5911static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5912 pci_channel_state_t state)
5913{
5914 switch (state) {
5915 case pci_channel_io_frozen:
5916 ipr_pci_frozen(pdev);
5917 return PCI_ERS_RESULT_NEED_RESET;
5918 case pci_channel_io_perm_failure:
5919 ipr_pci_perm_failure(pdev);
5920 return PCI_ERS_RESULT_DISCONNECT;
5921 break;
5922 default:
5923 break;
5924 }
5925 return PCI_ERS_RESULT_NEED_RESET;
5926}
5927
1da177e4
LT
5928/**
5929 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5930 * @ioa_cfg: ioa cfg struct
5931 *
5932 * Description: This is the second phase of adapter intialization
5933 * This function takes care of initilizing the adapter to the point
5934 * where it can accept new commands.
5935
5936 * Return value:
5937 * 0 on sucess / -EIO on failure
5938 **/
5939static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5940{
5941 int rc = 0;
5942 unsigned long host_lock_flags = 0;
5943
5944 ENTER;
5945 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5946 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 5947 if (ioa_cfg->needs_hard_reset) {
5948 ioa_cfg->needs_hard_reset = 0;
5949 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5950 } else
5951 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5952 IPR_SHUTDOWN_NONE);
1da177e4
LT
5953
5954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5955 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5956 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5957
5958 if (ioa_cfg->ioa_is_dead) {
5959 rc = -EIO;
5960 } else if (ipr_invalid_adapter(ioa_cfg)) {
5961 if (!ipr_testmode)
5962 rc = -EIO;
5963
5964 dev_err(&ioa_cfg->pdev->dev,
5965 "Adapter not supported in this hardware configuration.\n");
5966 }
5967
5968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5969
5970 LEAVE;
5971 return rc;
5972}
5973
5974/**
5975 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5976 * @ioa_cfg: ioa config struct
5977 *
5978 * Return value:
5979 * none
5980 **/
5981static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5982{
5983 int i;
5984
5985 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5986 if (ioa_cfg->ipr_cmnd_list[i])
5987 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5988 ioa_cfg->ipr_cmnd_list[i],
5989 ioa_cfg->ipr_cmnd_list_dma[i]);
5990
5991 ioa_cfg->ipr_cmnd_list[i] = NULL;
5992 }
5993
5994 if (ioa_cfg->ipr_cmd_pool)
5995 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5996
5997 ioa_cfg->ipr_cmd_pool = NULL;
5998}
5999
6000/**
6001 * ipr_free_mem - Frees memory allocated for an adapter
6002 * @ioa_cfg: ioa cfg struct
6003 *
6004 * Return value:
6005 * nothing
6006 **/
6007static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6008{
6009 int i;
6010
6011 kfree(ioa_cfg->res_entries);
6012 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6013 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6014 ipr_free_cmd_blks(ioa_cfg);
6015 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6016 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6017 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6018 ioa_cfg->cfg_table,
6019 ioa_cfg->cfg_table_dma);
6020
6021 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6022 pci_free_consistent(ioa_cfg->pdev,
6023 sizeof(struct ipr_hostrcb),
6024 ioa_cfg->hostrcb[i],
6025 ioa_cfg->hostrcb_dma[i]);
6026 }
6027
6028 ipr_free_dump(ioa_cfg);
1da177e4
LT
6029 kfree(ioa_cfg->trace);
6030}
6031
6032/**
6033 * ipr_free_all_resources - Free all allocated resources for an adapter.
6034 * @ipr_cmd: ipr command struct
6035 *
6036 * This function frees all allocated resources for the
6037 * specified adapter.
6038 *
6039 * Return value:
6040 * none
6041 **/
6042static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6043{
6044 struct pci_dev *pdev = ioa_cfg->pdev;
6045
6046 ENTER;
6047 free_irq(pdev->irq, ioa_cfg);
6048 iounmap(ioa_cfg->hdw_dma_regs);
6049 pci_release_regions(pdev);
6050 ipr_free_mem(ioa_cfg);
6051 scsi_host_put(ioa_cfg->host);
6052 pci_disable_device(pdev);
6053 LEAVE;
6054}
6055
6056/**
6057 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6058 * @ioa_cfg: ioa config struct
6059 *
6060 * Return value:
6061 * 0 on success / -ENOMEM on allocation failure
6062 **/
6063static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6064{
6065 struct ipr_cmnd *ipr_cmd;
6066 struct ipr_ioarcb *ioarcb;
6067 dma_addr_t dma_addr;
6068 int i;
6069
6070 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6071 sizeof(struct ipr_cmnd), 8, 0);
6072
6073 if (!ioa_cfg->ipr_cmd_pool)
6074 return -ENOMEM;
6075
6076 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6077 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6078
6079 if (!ipr_cmd) {
6080 ipr_free_cmd_blks(ioa_cfg);
6081 return -ENOMEM;
6082 }
6083
6084 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6085 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6086 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6087
6088 ioarcb = &ipr_cmd->ioarcb;
6089 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6090 ioarcb->host_response_handle = cpu_to_be32(i << 2);
6091 ioarcb->write_ioadl_addr =
6092 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6093 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6094 ioarcb->ioasa_host_pci_addr =
6095 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6096 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6097 ipr_cmd->cmd_index = i;
6098 ipr_cmd->ioa_cfg = ioa_cfg;
6099 ipr_cmd->sense_buffer_dma = dma_addr +
6100 offsetof(struct ipr_cmnd, sense_buffer);
6101
6102 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6103 }
6104
6105 return 0;
6106}
6107
6108/**
6109 * ipr_alloc_mem - Allocate memory for an adapter
6110 * @ioa_cfg: ioa config struct
6111 *
6112 * Return value:
6113 * 0 on success / non-zero for error
6114 **/
6115static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6116{
6117 struct pci_dev *pdev = ioa_cfg->pdev;
6118 int i, rc = -ENOMEM;
6119
6120 ENTER;
0bc42e35 6121 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
6122 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6123
6124 if (!ioa_cfg->res_entries)
6125 goto out;
6126
1da177e4
LT
6127 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6128 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6129
6130 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6131 sizeof(struct ipr_misc_cbs),
6132 &ioa_cfg->vpd_cbs_dma);
6133
6134 if (!ioa_cfg->vpd_cbs)
6135 goto out_free_res_entries;
6136
6137 if (ipr_alloc_cmd_blks(ioa_cfg))
6138 goto out_free_vpd_cbs;
6139
6140 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6141 sizeof(u32) * IPR_NUM_CMD_BLKS,
6142 &ioa_cfg->host_rrq_dma);
6143
6144 if (!ioa_cfg->host_rrq)
6145 goto out_ipr_free_cmd_blocks;
6146
6147 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6148 sizeof(struct ipr_config_table),
6149 &ioa_cfg->cfg_table_dma);
6150
6151 if (!ioa_cfg->cfg_table)
6152 goto out_free_host_rrq;
6153
6154 for (i = 0; i < IPR_NUM_HCAMS; i++) {
6155 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6156 sizeof(struct ipr_hostrcb),
6157 &ioa_cfg->hostrcb_dma[i]);
6158
6159 if (!ioa_cfg->hostrcb[i])
6160 goto out_free_hostrcb_dma;
6161
6162 ioa_cfg->hostrcb[i]->hostrcb_dma =
6163 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6164 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6165 }
6166
0bc42e35 6167 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
6168 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6169
6170 if (!ioa_cfg->trace)
6171 goto out_free_hostrcb_dma;
6172
1da177e4
LT
6173 rc = 0;
6174out:
6175 LEAVE;
6176 return rc;
6177
6178out_free_hostrcb_dma:
6179 while (i-- > 0) {
6180 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6181 ioa_cfg->hostrcb[i],
6182 ioa_cfg->hostrcb_dma[i]);
6183 }
6184 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6185 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6186out_free_host_rrq:
6187 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6188 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6189out_ipr_free_cmd_blocks:
6190 ipr_free_cmd_blks(ioa_cfg);
6191out_free_vpd_cbs:
6192 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6193 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6194out_free_res_entries:
6195 kfree(ioa_cfg->res_entries);
6196 goto out;
6197}
6198
6199/**
6200 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6201 * @ioa_cfg: ioa config struct
6202 *
6203 * Return value:
6204 * none
6205 **/
6206static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6207{
6208 int i;
6209
6210 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6211 ioa_cfg->bus_attr[i].bus = i;
6212 ioa_cfg->bus_attr[i].qas_enabled = 0;
6213 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6214 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6215 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6216 else
6217 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6218 }
6219}
6220
6221/**
6222 * ipr_init_ioa_cfg - Initialize IOA config struct
6223 * @ioa_cfg: ioa config struct
6224 * @host: scsi host struct
6225 * @pdev: PCI dev struct
6226 *
6227 * Return value:
6228 * none
6229 **/
6230static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6231 struct Scsi_Host *host, struct pci_dev *pdev)
6232{
6233 const struct ipr_interrupt_offsets *p;
6234 struct ipr_interrupts *t;
6235 void __iomem *base;
6236
6237 ioa_cfg->host = host;
6238 ioa_cfg->pdev = pdev;
6239 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 6240 ioa_cfg->doorbell = IPR_DOORBELL;
32d29776 6241 if (!ipr_auto_create)
6242 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6243 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6244 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6245 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6246 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6247 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6248 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6249 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6250 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6251
6252 INIT_LIST_HEAD(&ioa_cfg->free_q);
6253 INIT_LIST_HEAD(&ioa_cfg->pending_q);
6254 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6255 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6256 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6257 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6258 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6259 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6260 ioa_cfg->sdt_state = INACTIVE;
62275040 6261 if (ipr_enable_cache)
6262 ioa_cfg->cache_state = CACHE_ENABLED;
6263 else
6264 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
6265
6266 ipr_initialize_bus_attr(ioa_cfg);
6267
6268 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6269 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6270 host->max_channel = IPR_MAX_BUS_TO_SCAN;
6271 host->unique_id = host->host_no;
6272 host->max_cmd_len = IPR_MAX_CDB_LEN;
6273 pci_set_drvdata(pdev, ioa_cfg);
6274
6275 p = &ioa_cfg->chip_cfg->regs;
6276 t = &ioa_cfg->regs;
6277 base = ioa_cfg->hdw_dma_regs;
6278
6279 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6280 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6281 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6282 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6283 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6284 t->ioarrin_reg = base + p->ioarrin_reg;
6285 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6286 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6287 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6288}
6289
6290/**
6291 * ipr_get_chip_cfg - Find adapter chip configuration
6292 * @dev_id: PCI device id struct
6293 *
6294 * Return value:
6295 * ptr to chip config on success / NULL on failure
6296 **/
6297static const struct ipr_chip_cfg_t * __devinit
6298ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6299{
6300 int i;
6301
6302 if (dev_id->driver_data)
6303 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6304
6305 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6306 if (ipr_chip[i].vendor == dev_id->vendor &&
6307 ipr_chip[i].device == dev_id->device)
6308 return ipr_chip[i].cfg;
6309 return NULL;
6310}
6311
6312/**
6313 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6314 * @pdev: PCI device struct
6315 * @dev_id: PCI device id struct
6316 *
6317 * Return value:
6318 * 0 on success / non-zero on failure
6319 **/
6320static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6321 const struct pci_device_id *dev_id)
6322{
6323 struct ipr_ioa_cfg *ioa_cfg;
6324 struct Scsi_Host *host;
6325 unsigned long ipr_regs_pci;
6326 void __iomem *ipr_regs;
6327 u32 rc = PCIBIOS_SUCCESSFUL;
ce155cce 6328 volatile u32 mask, uproc;
1da177e4
LT
6329
6330 ENTER;
6331
6332 if ((rc = pci_enable_device(pdev))) {
6333 dev_err(&pdev->dev, "Cannot enable adapter\n");
6334 goto out;
6335 }
6336
6337 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6338
6339 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6340
6341 if (!host) {
6342 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6343 rc = -ENOMEM;
6344 goto out_disable;
6345 }
6346
6347 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6348 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6349
6350 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6351
6352 if (!ioa_cfg->chip_cfg) {
6353 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6354 dev_id->vendor, dev_id->device);
6355 goto out_scsi_host_put;
6356 }
6357
6358 ipr_regs_pci = pci_resource_start(pdev, 0);
6359
6360 rc = pci_request_regions(pdev, IPR_NAME);
6361 if (rc < 0) {
6362 dev_err(&pdev->dev,
6363 "Couldn't register memory range of registers\n");
6364 goto out_scsi_host_put;
6365 }
6366
6367 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6368
6369 if (!ipr_regs) {
6370 dev_err(&pdev->dev,
6371 "Couldn't map memory range of registers\n");
6372 rc = -ENOMEM;
6373 goto out_release_regions;
6374 }
6375
6376 ioa_cfg->hdw_dma_regs = ipr_regs;
6377 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6378 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6379
6380 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6381
6382 pci_set_master(pdev);
6383
6384 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6385 if (rc < 0) {
6386 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6387 goto cleanup_nomem;
6388 }
6389
6390 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6391 ioa_cfg->chip_cfg->cache_line_size);
6392
6393 if (rc != PCIBIOS_SUCCESSFUL) {
6394 dev_err(&pdev->dev, "Write of cache line size failed\n");
6395 rc = -EIO;
6396 goto cleanup_nomem;
6397 }
6398
6399 /* Save away PCI config space for use following IOA reset */
6400 rc = pci_save_state(pdev);
6401
6402 if (rc != PCIBIOS_SUCCESSFUL) {
6403 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6404 rc = -EIO;
6405 goto cleanup_nomem;
6406 }
6407
6408 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6409 goto cleanup_nomem;
6410
6411 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6412 goto cleanup_nomem;
6413
6414 rc = ipr_alloc_mem(ioa_cfg);
6415 if (rc < 0) {
6416 dev_err(&pdev->dev,
6417 "Couldn't allocate enough memory for device driver!\n");
6418 goto cleanup_nomem;
6419 }
6420
ce155cce 6421 /*
6422 * If HRRQ updated interrupt is not masked, or reset alert is set,
6423 * the card is in an unknown state and needs a hard reset
6424 */
6425 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6426 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6427 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6428 ioa_cfg->needs_hard_reset = 1;
6429
1da177e4 6430 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 6431 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
6432
6433 if (rc) {
6434 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6435 pdev->irq, rc);
6436 goto cleanup_nolog;
6437 }
6438
6439 spin_lock(&ipr_driver_lock);
6440 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6441 spin_unlock(&ipr_driver_lock);
6442
6443 LEAVE;
6444out:
6445 return rc;
6446
6447cleanup_nolog:
6448 ipr_free_mem(ioa_cfg);
6449cleanup_nomem:
6450 iounmap(ipr_regs);
6451out_release_regions:
6452 pci_release_regions(pdev);
6453out_scsi_host_put:
6454 scsi_host_put(host);
6455out_disable:
6456 pci_disable_device(pdev);
6457 goto out;
6458}
6459
6460/**
6461 * ipr_scan_vsets - Scans for VSET devices
6462 * @ioa_cfg: ioa config struct
6463 *
6464 * Description: Since the VSET resources do not follow SAM in that we can have
6465 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6466 *
6467 * Return value:
6468 * none
6469 **/
6470static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6471{
6472 int target, lun;
6473
6474 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6475 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6476 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6477}
6478
6479/**
6480 * ipr_initiate_ioa_bringdown - Bring down an adapter
6481 * @ioa_cfg: ioa config struct
6482 * @shutdown_type: shutdown type
6483 *
6484 * Description: This function will initiate bringing down the adapter.
6485 * This consists of issuing an IOA shutdown to the adapter
6486 * to flush the cache, and running BIST.
6487 * If the caller needs to wait on the completion of the reset,
6488 * the caller must sleep on the reset_wait_q.
6489 *
6490 * Return value:
6491 * none
6492 **/
6493static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6494 enum ipr_shutdown_type shutdown_type)
6495{
6496 ENTER;
6497 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6498 ioa_cfg->sdt_state = ABORT_DUMP;
6499 ioa_cfg->reset_retries = 0;
6500 ioa_cfg->in_ioa_bringdown = 1;
6501 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6502 LEAVE;
6503}
6504
6505/**
6506 * __ipr_remove - Remove a single adapter
6507 * @pdev: pci device struct
6508 *
6509 * Adapter hot plug remove entry point.
6510 *
6511 * Return value:
6512 * none
6513 **/
6514static void __ipr_remove(struct pci_dev *pdev)
6515{
6516 unsigned long host_lock_flags = 0;
6517 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6518 ENTER;
6519
6520 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6521 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6522
6523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6524 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 6525 flush_scheduled_work();
1da177e4
LT
6526 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6527
6528 spin_lock(&ipr_driver_lock);
6529 list_del(&ioa_cfg->queue);
6530 spin_unlock(&ipr_driver_lock);
6531
6532 if (ioa_cfg->sdt_state == ABORT_DUMP)
6533 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6535
6536 ipr_free_all_resources(ioa_cfg);
6537
6538 LEAVE;
6539}
6540
6541/**
6542 * ipr_remove - IOA hot plug remove entry point
6543 * @pdev: pci device struct
6544 *
6545 * Adapter hot plug remove entry point.
6546 *
6547 * Return value:
6548 * none
6549 **/
6550static void ipr_remove(struct pci_dev *pdev)
6551{
6552 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6553
6554 ENTER;
6555
1da177e4
LT
6556 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6557 &ipr_trace_attr);
6558 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6559 &ipr_dump_attr);
6560 scsi_remove_host(ioa_cfg->host);
6561
6562 __ipr_remove(pdev);
6563
6564 LEAVE;
6565}
6566
6567/**
6568 * ipr_probe - Adapter hot plug add entry point
6569 *
6570 * Return value:
6571 * 0 on success / non-zero on failure
6572 **/
6573static int __devinit ipr_probe(struct pci_dev *pdev,
6574 const struct pci_device_id *dev_id)
6575{
6576 struct ipr_ioa_cfg *ioa_cfg;
6577 int rc;
6578
6579 rc = ipr_probe_ioa(pdev, dev_id);
6580
6581 if (rc)
6582 return rc;
6583
6584 ioa_cfg = pci_get_drvdata(pdev);
6585 rc = ipr_probe_ioa_part2(ioa_cfg);
6586
6587 if (rc) {
6588 __ipr_remove(pdev);
6589 return rc;
6590 }
6591
6592 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6593
6594 if (rc) {
6595 __ipr_remove(pdev);
6596 return rc;
6597 }
6598
6599 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6600 &ipr_trace_attr);
6601
6602 if (rc) {
6603 scsi_remove_host(ioa_cfg->host);
6604 __ipr_remove(pdev);
6605 return rc;
6606 }
6607
6608 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6609 &ipr_dump_attr);
6610
6611 if (rc) {
6612 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6613 &ipr_trace_attr);
6614 scsi_remove_host(ioa_cfg->host);
6615 __ipr_remove(pdev);
6616 return rc;
6617 }
6618
6619 scsi_scan_host(ioa_cfg->host);
6620 ipr_scan_vsets(ioa_cfg);
6621 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6622 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 6623 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
6624 schedule_work(&ioa_cfg->work_q);
6625 return 0;
6626}
6627
6628/**
6629 * ipr_shutdown - Shutdown handler.
d18c3db5 6630 * @pdev: pci device struct
1da177e4
LT
6631 *
6632 * This function is invoked upon system shutdown/reboot. It will issue
6633 * an adapter shutdown to the adapter to flush the write cache.
6634 *
6635 * Return value:
6636 * none
6637 **/
d18c3db5 6638static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 6639{
d18c3db5 6640 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
6641 unsigned long lock_flags = 0;
6642
6643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6644 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6646 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6647}
6648
6649static struct pci_device_id ipr_pci_table[] __devinitdata = {
6650 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6652 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6653 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6654 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6655 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6656 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6658 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6659 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6660 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6661 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6662 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6664 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6667 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6670 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
86f51436 6671 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6673 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6674 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6676 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6679 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6682 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6683 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6685 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
1da177e4
LT
6686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6688 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6689 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6690 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6691 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
86f51436 6692 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6694 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
1da177e4
LT
6695 { }
6696};
6697MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6698
f8a88b19
LV
6699static struct pci_error_handlers ipr_err_handler = {
6700 .error_detected = ipr_pci_error_detected,
6701 .slot_reset = ipr_pci_slot_reset,
6702};
6703
1da177e4
LT
6704static struct pci_driver ipr_driver = {
6705 .name = IPR_NAME,
6706 .id_table = ipr_pci_table,
6707 .probe = ipr_probe,
6708 .remove = ipr_remove,
d18c3db5 6709 .shutdown = ipr_shutdown,
f8a88b19 6710 .err_handler = &ipr_err_handler,
1da177e4
LT
6711};
6712
6713/**
6714 * ipr_init - Module entry point
6715 *
6716 * Return value:
6717 * 0 on success / negative value on failure
6718 **/
6719static int __init ipr_init(void)
6720{
6721 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6722 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6723
6724 return pci_module_init(&ipr_driver);
6725}
6726
6727/**
6728 * ipr_exit - Module unload
6729 *
6730 * Module unload entry point.
6731 *
6732 * Return value:
6733 * none
6734 **/
6735static void __exit ipr_exit(void)
6736{
6737 pci_unregister_driver(&ipr_driver);
6738}
6739
6740module_init(ipr_init);
6741module_exit(ipr_exit);
This page took 0.42333 seconds and 5 git commands to generate.