PCI: add D3 power state avoidance quirk
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
35a39691 73#include <linux/libata.h>
0ce3a7e5 74#include <linux/hdreg.h>
1da177e4
LT
75#include <asm/io.h>
76#include <asm/irq.h>
77#include <asm/processor.h>
78#include <scsi/scsi.h>
79#include <scsi/scsi_host.h>
80#include <scsi/scsi_tcq.h>
81#include <scsi/scsi_eh.h>
82#include <scsi/scsi_cmnd.h>
1da177e4
LT
83#include "ipr.h"
84
85/*
86 * Global Data
87 */
b7d68ca3 88static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
5469cb5b 93static unsigned int ipr_transop_timeout = 0;
62275040 94static unsigned int ipr_enable_cache = 1;
d3c74871 95static unsigned int ipr_debug = 0;
ac09c349 96static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 101 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
102 .mailbox = 0x0042C,
103 .cache_line_size = 0x20,
104 {
105 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_reg = 0x00228,
109 .sense_interrupt_reg = 0x00224,
110 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218
114 }
115 },
116 { /* Snipe and Scamp */
117 .mailbox = 0x0052C,
118 .cache_line_size = 0x20,
119 {
120 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_reg = 0x00284,
124 .sense_interrupt_reg = 0x00280,
125 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294
129 }
130 },
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
86f51436 136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
60e7486b 138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
1da177e4
LT
139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141};
142
143static int ipr_max_bus_speeds [] = {
144 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145};
146
147MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
148MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
149module_param_named(max_speed, ipr_max_speed, uint, 0);
150MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
151module_param_named(log_level, ipr_log_level, uint, 0);
152MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
153module_param_named(testmode, ipr_testmode, int, 0);
154MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
155module_param_named(fastfail, ipr_fastfail, int, 0);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
62275040 159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
d3c74871 161module_param_named(debug, ipr_debug, int, 0);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
1da177e4
LT
165MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION);
167
1da177e4
LT
168/* A constant array of IOASCs/URCs/Error Messages */
169static const
170struct ipr_error_table_t ipr_error_table[] = {
933916f3 171 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
172 "8155: An unknown error was received"},
173 {0x00330000, 0, 0,
174 "Soft underlength error"},
175 {0x005A0000, 0, 0,
176 "Command to be cancelled not found"},
177 {0x00808000, 0, 0,
178 "Qualified success"},
933916f3 179 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 180 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 181 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 182 "4101: Soft device bus fabric error"},
933916f3 183 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 184 "FFF9: Device sector reassign successful"},
933916f3 185 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 186 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 187 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 188 "7001: IOA sector reassignment successful"},
933916f3 189 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 190 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 191 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 192 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 193 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 194 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 195 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 196 "FFF6: Device hardware error recovered by the IOA"},
933916f3 197 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 198 "FFF6: Device hardware error recovered by the device"},
933916f3 199 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 200 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 201 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 202 "FFFA: Undefined device response recovered by the IOA"},
933916f3 203 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 204 "FFF6: Device bus error, message or command phase"},
933916f3 205 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 206 "FFFE: Task Management Function failed"},
933916f3 207 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 208 "FFF6: Failure prediction threshold exceeded"},
933916f3 209 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
210 "8009: Impending cache battery pack failure"},
211 {0x02040400, 0, 0,
212 "34FF: Disk device format in progress"},
65f56475
BK
213 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
214 "9070: IOA requested reset"},
1da177e4
LT
215 {0x023F0000, 0, 0,
216 "Synchronization required"},
217 {0x024E0000, 0, 0,
218 "No ready, IOA shutdown"},
219 {0x025A0000, 0, 0,
220 "Not ready, IOA has been shutdown"},
933916f3 221 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
222 "3020: Storage subsystem configuration error"},
223 {0x03110B00, 0, 0,
224 "FFF5: Medium error, data unreadable, recommend reassign"},
225 {0x03110C00, 0, 0,
226 "7000: Medium error, data unreadable, do not reassign"},
933916f3 227 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 228 "FFF3: Disk media format bad"},
933916f3 229 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 230 "3002: Addressed device failed to respond to selection"},
933916f3 231 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 232 "3100: Device bus error"},
933916f3 233 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
234 "3109: IOA timed out a device command"},
235 {0x04088000, 0, 0,
236 "3120: SCSI bus is not operational"},
933916f3 237 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 238 "4100: Hard device bus fabric error"},
933916f3 239 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 240 "9000: IOA reserved area data check"},
933916f3 241 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 242 "9001: IOA reserved area invalid data pattern"},
933916f3 243 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 244 "9002: IOA reserved area LRC error"},
933916f3 245 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 246 "102E: Out of alternate sectors for disk storage"},
933916f3 247 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "FFF4: Data transfer underlength error"},
933916f3 249 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FFF4: Data transfer overlength error"},
933916f3 251 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 252 "3400: Logical unit failure"},
933916f3 253 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF4: Device microcode is corrupt"},
933916f3 255 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
256 "8150: PCI bus error"},
257 {0x04430000, 1, 0,
258 "Unsupported device bus message received"},
933916f3 259 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF4: Disk device problem"},
933916f3 261 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "8150: Permanent IOA failure"},
933916f3 263 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "3010: Disk device returned wrong response to IOA"},
933916f3 265 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
266 "8151: IOA microcode error"},
267 {0x04448500, 0, 0,
268 "Device bus status error"},
933916f3 269 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 270 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
271 {0x04448700, 0, 0,
272 "ATA device status error"},
1da177e4
LT
273 {0x04490000, 0, 0,
274 "Message reject received from the device"},
933916f3 275 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 276 "8008: A permanent cache battery pack failure occurred"},
933916f3 277 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 278 "9090: Disk unit has been modified after the last known status"},
933916f3 279 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 280 "9081: IOA detected device error"},
933916f3 281 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 282 "9082: IOA detected device error"},
933916f3 283 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 284 "3110: Device bus error, message or command phase"},
933916f3 285 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 286 "3110: SAS Command / Task Management Function failed"},
933916f3 287 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 288 "9091: Incorrect hardware configuration change has been detected"},
933916f3 289 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 290 "9073: Invalid multi-adapter configuration"},
933916f3 291 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 292 "4010: Incorrect connection between cascaded expanders"},
933916f3 293 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 294 "4020: Connections exceed IOA design limits"},
933916f3 295 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 296 "4030: Incorrect multipath connection"},
933916f3 297 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 298 "4110: Unsupported enclosure function"},
933916f3 299 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
300 "FFF4: Command to logical unit failed"},
301 {0x05240000, 1, 0,
302 "Illegal request, invalid request type or request packet"},
303 {0x05250000, 0, 0,
304 "Illegal request, invalid resource handle"},
b0df54bb 305 {0x05258000, 0, 0,
306 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"},
1da177e4
LT
309 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0,
312 "Illegal request, parameter not supported"},
313 {0x05260200, 0, 0,
314 "Illegal request, parameter value invalid"},
315 {0x052C0000, 0, 0,
316 "Illegal request, command sequence error"},
b0df54bb 317 {0x052C8000, 1, 0,
318 "Illegal request, dual adapter support not enabled"},
933916f3 319 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 320 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 321 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 323 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 324 "3140: Device bus not ready to ready transition"},
933916f3 325 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
326 "FFFB: SCSI bus was reset"},
327 {0x06290500, 0, 0,
328 "FFFE: SCSI bus transition to single ended"},
329 {0x06290600, 0, 0,
330 "FFFE: SCSI bus transition to LVD"},
933916f3 331 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 332 "FFFB: SCSI bus was reset by another initiator"},
933916f3 333 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "3029: A device replacement has occurred"},
933916f3 335 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 336 "9051: IOA cache data exists for a missing or failed device"},
933916f3 337 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 338 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 339 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "9025: Disk unit is not supported at its physical location"},
933916f3 341 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "3020: IOA detected a SCSI bus configuration error"},
933916f3 343 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "3150: SCSI bus configuration error"},
933916f3 345 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 346 "9074: Asymmetric advanced function disk configuration"},
933916f3 347 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 348 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 349 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 350 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 351 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 352 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 353 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 354 "9076: Configuration error, missing remote IOA"},
933916f3 355 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 356 "4050: Enclosure does not support a required multipath function"},
933916f3 357 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 358 "9041: Array protection temporarily suspended"},
933916f3 359 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 360 "9042: Corrupt array parity detected on specified device"},
933916f3 361 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 363 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 364 "9071: Link operational transition"},
933916f3 365 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 366 "9072: Link not operational transition"},
933916f3 367 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 368 "9032: Array exposed but still protected"},
e435340c
BK
369 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
370 "70DD: Device forced failed by disrupt device command"},
933916f3 371 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 372 "4061: Multipath redundancy level got better"},
933916f3 373 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 374 "4060: Multipath redundancy level got worse"},
1da177e4
LT
375 {0x07270000, 0, 0,
376 "Failure due to other device"},
933916f3 377 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 378 "9008: IOA does not support functions expected by devices"},
933916f3 379 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 380 "9010: Cache data associated with attached devices cannot be found"},
933916f3 381 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 382 "9011: Cache data belongs to devices other than those attached"},
933916f3 383 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 384 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 385 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 386 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 387 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 388 "9022: Exposed array is missing a required device"},
933916f3 389 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 390 "9023: Array member(s) not at required physical locations"},
933916f3 391 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 392 "9024: Array not functional due to present hardware configuration"},
933916f3 393 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 394 "9026: Array not functional due to present hardware configuration"},
933916f3 395 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 396 "9027: Array is missing a device and parity is out of sync"},
933916f3 397 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 398 "9028: Maximum number of arrays already exist"},
933916f3 399 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 400 "9050: Required cache data cannot be located for a disk unit"},
933916f3 401 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 402 "9052: Cache data exists for a device that has been modified"},
933916f3 403 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 404 "9054: IOA resources not available due to previous problems"},
933916f3 405 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 406 "9092: Disk unit requires initialization before use"},
933916f3 407 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 408 "9029: Incorrect hardware configuration change has been detected"},
933916f3 409 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 410 "9060: One or more disk pairs are missing from an array"},
933916f3 411 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 412 "9061: One or more disks are missing from an array"},
933916f3 413 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 414 "9062: One or more disks are missing from an array"},
933916f3 415 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
416 "9063: Maximum number of functional arrays has been exceeded"},
417 {0x0B260000, 0, 0,
418 "Aborted command, invalid descriptor"},
419 {0x0B5A0000, 0, 0,
420 "Command terminated by host"}
421};
422
423static const struct ipr_ses_table_entry ipr_ses_table[] = {
424 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
425 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
426 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
427 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
428 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
429 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
430 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
431 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
432 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
433 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
434 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
435 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
436 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
437};
438
439/*
440 * Function Prototypes
441 */
442static int ipr_reset_alert(struct ipr_cmnd *);
443static void ipr_process_ccn(struct ipr_cmnd *);
444static void ipr_process_error(struct ipr_cmnd *);
445static void ipr_reset_ioa_job(struct ipr_cmnd *);
446static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
447 enum ipr_shutdown_type);
448
449#ifdef CONFIG_SCSI_IPR_TRACE
450/**
451 * ipr_trc_hook - Add a trace entry to the driver trace
452 * @ipr_cmd: ipr command struct
453 * @type: trace type
454 * @add_data: additional data
455 *
456 * Return value:
457 * none
458 **/
459static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
460 u8 type, u32 add_data)
461{
462 struct ipr_trace_entry *trace_entry;
463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
464
465 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
466 trace_entry->time = jiffies;
467 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
468 trace_entry->type = type;
35a39691
BK
469 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
470 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
471 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
472 trace_entry->u.add_data = add_data;
473}
474#else
475#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
476#endif
477
478/**
479 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
480 * @ipr_cmd: ipr command struct
481 *
482 * Return value:
483 * none
484 **/
485static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
486{
487 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
488 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
51b1c7e1 489 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
490
491 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
492 ioarcb->write_data_transfer_length = 0;
493 ioarcb->read_data_transfer_length = 0;
494 ioarcb->write_ioadl_len = 0;
495 ioarcb->read_ioadl_len = 0;
51b1c7e1
BK
496 ioarcb->write_ioadl_addr =
497 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
498 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
499 ioasa->ioasc = 0;
500 ioasa->residual_data_len = 0;
35a39691 501 ioasa->u.gata.status = 0;
1da177e4
LT
502
503 ipr_cmd->scsi_cmd = NULL;
35a39691 504 ipr_cmd->qc = NULL;
1da177e4
LT
505 ipr_cmd->sense_buffer[0] = 0;
506 ipr_cmd->dma_use_sg = 0;
507}
508
509/**
510 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
511 * @ipr_cmd: ipr command struct
512 *
513 * Return value:
514 * none
515 **/
516static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
517{
518 ipr_reinit_ipr_cmnd(ipr_cmd);
519 ipr_cmd->u.scratch = 0;
520 ipr_cmd->sibling = NULL;
521 init_timer(&ipr_cmd->timer);
522}
523
524/**
525 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
526 * @ioa_cfg: ioa config struct
527 *
528 * Return value:
529 * pointer to ipr command struct
530 **/
531static
532struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
533{
534 struct ipr_cmnd *ipr_cmd;
535
536 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
537 list_del(&ipr_cmd->queue);
538 ipr_init_ipr_cmnd(ipr_cmd);
539
540 return ipr_cmd;
541}
542
1da177e4
LT
543/**
544 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
545 * @ioa_cfg: ioa config struct
546 * @clr_ints: interrupts to clear
547 *
548 * This function masks all interrupts on the adapter, then clears the
549 * interrupts specified in the mask
550 *
551 * Return value:
552 * none
553 **/
554static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
555 u32 clr_ints)
556{
557 volatile u32 int_reg;
558
559 /* Stop new interrupts */
560 ioa_cfg->allow_interrupts = 0;
561
562 /* Set interrupt mask to stop all new interrupts */
563 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
564
565 /* Clear any pending interrupts */
566 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
567 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
568}
569
570/**
571 * ipr_save_pcix_cmd_reg - Save PCI-X command register
572 * @ioa_cfg: ioa config struct
573 *
574 * Return value:
575 * 0 on success / -EIO on failure
576 **/
577static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
578{
579 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
580
7dce0e1c
BK
581 if (pcix_cmd_reg == 0)
582 return 0;
1da177e4
LT
583
584 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
587 return -EIO;
588 }
589
590 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
591 return 0;
592}
593
594/**
595 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
596 * @ioa_cfg: ioa config struct
597 *
598 * Return value:
599 * 0 on success / -EIO on failure
600 **/
601static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
602{
603 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
604
605 if (pcix_cmd_reg) {
606 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
609 return -EIO;
610 }
1da177e4
LT
611 }
612
613 return 0;
614}
615
35a39691
BK
616/**
617 * ipr_sata_eh_done - done function for aborted SATA commands
618 * @ipr_cmd: ipr command struct
619 *
620 * This function is invoked for ops generated to SATA
621 * devices which are being aborted.
622 *
623 * Return value:
624 * none
625 **/
626static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
627{
628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
629 struct ata_queued_cmd *qc = ipr_cmd->qc;
630 struct ipr_sata_port *sata_port = qc->ap->private_data;
631
632 qc->err_mask |= AC_ERR_OTHER;
633 sata_port->ioasa.status |= ATA_BUSY;
634 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
635 ata_qc_complete(qc);
636}
637
1da177e4
LT
638/**
639 * ipr_scsi_eh_done - mid-layer done function for aborted ops
640 * @ipr_cmd: ipr command struct
641 *
642 * This function is invoked by the interrupt handler for
643 * ops generated by the SCSI mid-layer which are being aborted.
644 *
645 * Return value:
646 * none
647 **/
648static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
649{
650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
652
653 scsi_cmd->result |= (DID_ERROR << 16);
654
63015bc9 655 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
656 scsi_cmd->scsi_done(scsi_cmd);
657 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658}
659
660/**
661 * ipr_fail_all_ops - Fails all outstanding ops.
662 * @ioa_cfg: ioa config struct
663 *
664 * This function fails all outstanding ops.
665 *
666 * Return value:
667 * none
668 **/
669static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
670{
671 struct ipr_cmnd *ipr_cmd, *temp;
672
673 ENTER;
674 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
675 list_del(&ipr_cmd->queue);
676
677 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
678 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
679
680 if (ipr_cmd->scsi_cmd)
681 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
682 else if (ipr_cmd->qc)
683 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
684
685 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
686 del_timer(&ipr_cmd->timer);
687 ipr_cmd->done(ipr_cmd);
688 }
689
690 LEAVE;
691}
692
693/**
694 * ipr_do_req - Send driver initiated requests.
695 * @ipr_cmd: ipr command struct
696 * @done: done function
697 * @timeout_func: timeout function
698 * @timeout: timeout value
699 *
700 * This function sends the specified command to the adapter with the
701 * timeout given. The done function is invoked on command completion.
702 *
703 * Return value:
704 * none
705 **/
706static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
707 void (*done) (struct ipr_cmnd *),
708 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
709{
710 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
711
712 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
713
714 ipr_cmd->done = done;
715
716 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
717 ipr_cmd->timer.expires = jiffies + timeout;
718 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
719
720 add_timer(&ipr_cmd->timer);
721
722 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
723
724 mb();
725 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
726 ioa_cfg->regs.ioarrin_reg);
727}
728
729/**
730 * ipr_internal_cmd_done - Op done function for an internally generated op.
731 * @ipr_cmd: ipr command struct
732 *
733 * This function is the op done function for an internally generated,
734 * blocking op. It simply wakes the sleeping thread.
735 *
736 * Return value:
737 * none
738 **/
739static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
740{
741 if (ipr_cmd->sibling)
742 ipr_cmd->sibling = NULL;
743 else
744 complete(&ipr_cmd->completion);
745}
746
747/**
748 * ipr_send_blocking_cmd - Send command and sleep on its completion.
749 * @ipr_cmd: ipr command struct
750 * @timeout_func: function to invoke if command times out
751 * @timeout: timeout
752 *
753 * Return value:
754 * none
755 **/
756static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
757 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
758 u32 timeout)
759{
760 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
761
762 init_completion(&ipr_cmd->completion);
763 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
764
765 spin_unlock_irq(ioa_cfg->host->host_lock);
766 wait_for_completion(&ipr_cmd->completion);
767 spin_lock_irq(ioa_cfg->host->host_lock);
768}
769
770/**
771 * ipr_send_hcam - Send an HCAM to the adapter.
772 * @ioa_cfg: ioa config struct
773 * @type: HCAM type
774 * @hostrcb: hostrcb struct
775 *
776 * This function will send a Host Controlled Async command to the adapter.
777 * If HCAMs are currently not allowed to be issued to the adapter, it will
778 * place the hostrcb on the free queue.
779 *
780 * Return value:
781 * none
782 **/
783static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
784 struct ipr_hostrcb *hostrcb)
785{
786 struct ipr_cmnd *ipr_cmd;
787 struct ipr_ioarcb *ioarcb;
788
789 if (ioa_cfg->allow_cmds) {
790 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
791 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
792 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
793
794 ipr_cmd->u.hostrcb = hostrcb;
795 ioarcb = &ipr_cmd->ioarcb;
796
797 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
798 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
799 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
800 ioarcb->cmd_pkt.cdb[1] = type;
801 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
802 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
803
804 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
805 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
806 ipr_cmd->ioadl[0].flags_and_data_len =
807 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
808 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
809
810 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
811 ipr_cmd->done = ipr_process_ccn;
812 else
813 ipr_cmd->done = ipr_process_error;
814
815 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
816
817 mb();
818 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
819 ioa_cfg->regs.ioarrin_reg);
820 } else {
821 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
822 }
823}
824
825/**
826 * ipr_init_res_entry - Initialize a resource entry struct.
827 * @res: resource entry struct
828 *
829 * Return value:
830 * none
831 **/
832static void ipr_init_res_entry(struct ipr_resource_entry *res)
833{
ee0a90fa 834 res->needs_sync_complete = 0;
1da177e4
LT
835 res->in_erp = 0;
836 res->add_to_ml = 0;
837 res->del_from_ml = 0;
838 res->resetting_device = 0;
839 res->sdev = NULL;
35a39691 840 res->sata_port = NULL;
1da177e4
LT
841}
842
843/**
844 * ipr_handle_config_change - Handle a config change from the adapter
845 * @ioa_cfg: ioa config struct
846 * @hostrcb: hostrcb
847 *
848 * Return value:
849 * none
850 **/
851static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
852 struct ipr_hostrcb *hostrcb)
853{
854 struct ipr_resource_entry *res = NULL;
855 struct ipr_config_table_entry *cfgte;
856 u32 is_ndn = 1;
857
858 cfgte = &hostrcb->hcam.u.ccn.cfgte;
859
860 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
861 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
862 sizeof(cfgte->res_addr))) {
863 is_ndn = 0;
864 break;
865 }
866 }
867
868 if (is_ndn) {
869 if (list_empty(&ioa_cfg->free_res_q)) {
870 ipr_send_hcam(ioa_cfg,
871 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
872 hostrcb);
873 return;
874 }
875
876 res = list_entry(ioa_cfg->free_res_q.next,
877 struct ipr_resource_entry, queue);
878
879 list_del(&res->queue);
880 ipr_init_res_entry(res);
881 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
882 }
883
884 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
885
886 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
887 if (res->sdev) {
1da177e4 888 res->del_from_ml = 1;
1121b794 889 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
890 if (ioa_cfg->allow_ml_add_del)
891 schedule_work(&ioa_cfg->work_q);
892 } else
893 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
894 } else if (!res->sdev) {
895 res->add_to_ml = 1;
896 if (ioa_cfg->allow_ml_add_del)
897 schedule_work(&ioa_cfg->work_q);
898 }
899
900 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
901}
902
903/**
904 * ipr_process_ccn - Op done function for a CCN.
905 * @ipr_cmd: ipr command struct
906 *
907 * This function is the op done function for a configuration
908 * change notification host controlled async from the adapter.
909 *
910 * Return value:
911 * none
912 **/
913static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
914{
915 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
916 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
917 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
918
919 list_del(&hostrcb->queue);
920 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
921
922 if (ioasc) {
923 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
924 dev_err(&ioa_cfg->pdev->dev,
925 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
926
927 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
928 } else {
929 ipr_handle_config_change(ioa_cfg, hostrcb);
930 }
931}
932
8cf093e2
BK
933/**
934 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
935 * @i: index into buffer
936 * @buf: string to modify
937 *
938 * This function will strip all trailing whitespace, pad the end
939 * of the string with a single space, and NULL terminate the string.
940 *
941 * Return value:
942 * new length of string
943 **/
944static int strip_and_pad_whitespace(int i, char *buf)
945{
946 while (i && buf[i] == ' ')
947 i--;
948 buf[i+1] = ' ';
949 buf[i+2] = '\0';
950 return i + 2;
951}
952
953/**
954 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
955 * @prefix: string to print at start of printk
956 * @hostrcb: hostrcb pointer
957 * @vpd: vendor/product id/sn struct
958 *
959 * Return value:
960 * none
961 **/
962static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
963 struct ipr_vpd *vpd)
964{
965 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
966 int i = 0;
967
968 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
969 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
970
971 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
972 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
973
974 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
975 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
976
977 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
978}
979
1da177e4
LT
980/**
981 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 982 * @vpd: vendor/product id/sn struct
1da177e4
LT
983 *
984 * Return value:
985 * none
986 **/
cfc32139 987static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
988{
989 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
990 + IPR_SERIAL_NUM_LEN];
991
cfc32139 992 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
993 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
994 IPR_PROD_ID_LEN);
995 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
996 ipr_err("Vendor/Product ID: %s\n", buffer);
997
cfc32139 998 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
999 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1000 ipr_err(" Serial Number: %s\n", buffer);
1001}
1002
8cf093e2
BK
1003/**
1004 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1005 * @prefix: string to print at start of printk
1006 * @hostrcb: hostrcb pointer
1007 * @vpd: vendor/product id/sn/wwn struct
1008 *
1009 * Return value:
1010 * none
1011 **/
1012static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1013 struct ipr_ext_vpd *vpd)
1014{
1015 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1016 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1017 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1018}
1019
ee0f05b8 1020/**
1021 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1022 * @vpd: vendor/product id/sn/wwn struct
1023 *
1024 * Return value:
1025 * none
1026 **/
1027static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1028{
1029 ipr_log_vpd(&vpd->vpd);
1030 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1031 be32_to_cpu(vpd->wwid[1]));
1032}
1033
1034/**
1035 * ipr_log_enhanced_cache_error - Log a cache error.
1036 * @ioa_cfg: ioa config struct
1037 * @hostrcb: hostrcb struct
1038 *
1039 * Return value:
1040 * none
1041 **/
1042static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1043 struct ipr_hostrcb *hostrcb)
1044{
1045 struct ipr_hostrcb_type_12_error *error =
1046 &hostrcb->hcam.u.error.u.type_12_error;
1047
1048 ipr_err("-----Current Configuration-----\n");
1049 ipr_err("Cache Directory Card Information:\n");
1050 ipr_log_ext_vpd(&error->ioa_vpd);
1051 ipr_err("Adapter Card Information:\n");
1052 ipr_log_ext_vpd(&error->cfc_vpd);
1053
1054 ipr_err("-----Expected Configuration-----\n");
1055 ipr_err("Cache Directory Card Information:\n");
1056 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1057 ipr_err("Adapter Card Information:\n");
1058 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1059
1060 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1061 be32_to_cpu(error->ioa_data[0]),
1062 be32_to_cpu(error->ioa_data[1]),
1063 be32_to_cpu(error->ioa_data[2]));
1064}
1065
1da177e4
LT
1066/**
1067 * ipr_log_cache_error - Log a cache error.
1068 * @ioa_cfg: ioa config struct
1069 * @hostrcb: hostrcb struct
1070 *
1071 * Return value:
1072 * none
1073 **/
1074static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1075 struct ipr_hostrcb *hostrcb)
1076{
1077 struct ipr_hostrcb_type_02_error *error =
1078 &hostrcb->hcam.u.error.u.type_02_error;
1079
1080 ipr_err("-----Current Configuration-----\n");
1081 ipr_err("Cache Directory Card Information:\n");
cfc32139 1082 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1083 ipr_err("Adapter Card Information:\n");
cfc32139 1084 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1085
1086 ipr_err("-----Expected Configuration-----\n");
1087 ipr_err("Cache Directory Card Information:\n");
cfc32139 1088 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1089 ipr_err("Adapter Card Information:\n");
cfc32139 1090 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1091
1092 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1093 be32_to_cpu(error->ioa_data[0]),
1094 be32_to_cpu(error->ioa_data[1]),
1095 be32_to_cpu(error->ioa_data[2]));
1096}
1097
ee0f05b8 1098/**
1099 * ipr_log_enhanced_config_error - Log a configuration error.
1100 * @ioa_cfg: ioa config struct
1101 * @hostrcb: hostrcb struct
1102 *
1103 * Return value:
1104 * none
1105 **/
1106static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1107 struct ipr_hostrcb *hostrcb)
1108{
1109 int errors_logged, i;
1110 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1111 struct ipr_hostrcb_type_13_error *error;
1112
1113 error = &hostrcb->hcam.u.error.u.type_13_error;
1114 errors_logged = be32_to_cpu(error->errors_logged);
1115
1116 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1117 be32_to_cpu(error->errors_detected), errors_logged);
1118
1119 dev_entry = error->dev;
1120
1121 for (i = 0; i < errors_logged; i++, dev_entry++) {
1122 ipr_err_separator;
1123
1124 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1125 ipr_log_ext_vpd(&dev_entry->vpd);
1126
1127 ipr_err("-----New Device Information-----\n");
1128 ipr_log_ext_vpd(&dev_entry->new_vpd);
1129
1130 ipr_err("Cache Directory Card Information:\n");
1131 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1132
1133 ipr_err("Adapter Card Information:\n");
1134 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1135 }
1136}
1137
1da177e4
LT
1138/**
1139 * ipr_log_config_error - Log a configuration error.
1140 * @ioa_cfg: ioa config struct
1141 * @hostrcb: hostrcb struct
1142 *
1143 * Return value:
1144 * none
1145 **/
1146static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1147 struct ipr_hostrcb *hostrcb)
1148{
1149 int errors_logged, i;
1150 struct ipr_hostrcb_device_data_entry *dev_entry;
1151 struct ipr_hostrcb_type_03_error *error;
1152
1153 error = &hostrcb->hcam.u.error.u.type_03_error;
1154 errors_logged = be32_to_cpu(error->errors_logged);
1155
1156 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1157 be32_to_cpu(error->errors_detected), errors_logged);
1158
cfc32139 1159 dev_entry = error->dev;
1da177e4
LT
1160
1161 for (i = 0; i < errors_logged; i++, dev_entry++) {
1162 ipr_err_separator;
1163
fa15b1f6 1164 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1165 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1166
1167 ipr_err("-----New Device Information-----\n");
cfc32139 1168 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1169
1170 ipr_err("Cache Directory Card Information:\n");
cfc32139 1171 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1172
1173 ipr_err("Adapter Card Information:\n");
cfc32139 1174 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1175
1176 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1177 be32_to_cpu(dev_entry->ioa_data[0]),
1178 be32_to_cpu(dev_entry->ioa_data[1]),
1179 be32_to_cpu(dev_entry->ioa_data[2]),
1180 be32_to_cpu(dev_entry->ioa_data[3]),
1181 be32_to_cpu(dev_entry->ioa_data[4]));
1182 }
1183}
1184
ee0f05b8 1185/**
1186 * ipr_log_enhanced_array_error - Log an array configuration error.
1187 * @ioa_cfg: ioa config struct
1188 * @hostrcb: hostrcb struct
1189 *
1190 * Return value:
1191 * none
1192 **/
1193static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1194 struct ipr_hostrcb *hostrcb)
1195{
1196 int i, num_entries;
1197 struct ipr_hostrcb_type_14_error *error;
1198 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1199 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1200
1201 error = &hostrcb->hcam.u.error.u.type_14_error;
1202
1203 ipr_err_separator;
1204
1205 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1206 error->protection_level,
1207 ioa_cfg->host->host_no,
1208 error->last_func_vset_res_addr.bus,
1209 error->last_func_vset_res_addr.target,
1210 error->last_func_vset_res_addr.lun);
1211
1212 ipr_err_separator;
1213
1214 array_entry = error->array_member;
1215 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1216 sizeof(error->array_member));
1217
1218 for (i = 0; i < num_entries; i++, array_entry++) {
1219 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1220 continue;
1221
1222 if (be32_to_cpu(error->exposed_mode_adn) == i)
1223 ipr_err("Exposed Array Member %d:\n", i);
1224 else
1225 ipr_err("Array Member %d:\n", i);
1226
1227 ipr_log_ext_vpd(&array_entry->vpd);
1228 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1229 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1230 "Expected Location");
1231
1232 ipr_err_separator;
1233 }
1234}
1235
1da177e4
LT
1236/**
1237 * ipr_log_array_error - Log an array configuration error.
1238 * @ioa_cfg: ioa config struct
1239 * @hostrcb: hostrcb struct
1240 *
1241 * Return value:
1242 * none
1243 **/
1244static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1245 struct ipr_hostrcb *hostrcb)
1246{
1247 int i;
1248 struct ipr_hostrcb_type_04_error *error;
1249 struct ipr_hostrcb_array_data_entry *array_entry;
1250 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1251
1252 error = &hostrcb->hcam.u.error.u.type_04_error;
1253
1254 ipr_err_separator;
1255
1256 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1257 error->protection_level,
1258 ioa_cfg->host->host_no,
1259 error->last_func_vset_res_addr.bus,
1260 error->last_func_vset_res_addr.target,
1261 error->last_func_vset_res_addr.lun);
1262
1263 ipr_err_separator;
1264
1265 array_entry = error->array_member;
1266
1267 for (i = 0; i < 18; i++) {
cfc32139 1268 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1269 continue;
1270
fa15b1f6 1271 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1272 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1273 else
1da177e4 1274 ipr_err("Array Member %d:\n", i);
1da177e4 1275
cfc32139 1276 ipr_log_vpd(&array_entry->vpd);
1da177e4 1277
fa15b1f6 1278 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1279 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1280 "Expected Location");
1da177e4
LT
1281
1282 ipr_err_separator;
1283
1284 if (i == 9)
1285 array_entry = error->array_member2;
1286 else
1287 array_entry++;
1288 }
1289}
1290
1291/**
b0df54bb 1292 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1293 * @ioa_cfg: ioa config struct
b0df54bb 1294 * @data: IOA error data
1295 * @len: data length
1da177e4
LT
1296 *
1297 * Return value:
1298 * none
1299 **/
ac719aba 1300static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1301{
1302 int i;
1da177e4 1303
b0df54bb 1304 if (len == 0)
1da177e4
LT
1305 return;
1306
ac719aba
BK
1307 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1308 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1309
b0df54bb 1310 for (i = 0; i < len / 4; i += 4) {
1da177e4 1311 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1312 be32_to_cpu(data[i]),
1313 be32_to_cpu(data[i+1]),
1314 be32_to_cpu(data[i+2]),
1315 be32_to_cpu(data[i+3]));
1da177e4
LT
1316 }
1317}
1318
ee0f05b8 1319/**
1320 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1321 * @ioa_cfg: ioa config struct
1322 * @hostrcb: hostrcb struct
1323 *
1324 * Return value:
1325 * none
1326 **/
1327static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1328 struct ipr_hostrcb *hostrcb)
1329{
1330 struct ipr_hostrcb_type_17_error *error;
1331
1332 error = &hostrcb->hcam.u.error.u.type_17_error;
1333 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
8cf093e2 1334 strstrip(error->failure_reason);
ee0f05b8 1335
8cf093e2
BK
1336 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1337 be32_to_cpu(hostrcb->hcam.u.error.prc));
1338 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1339 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1340 be32_to_cpu(hostrcb->hcam.length) -
1341 (offsetof(struct ipr_hostrcb_error, u) +
1342 offsetof(struct ipr_hostrcb_type_17_error, data)));
1343}
1344
b0df54bb 1345/**
1346 * ipr_log_dual_ioa_error - Log a dual adapter error.
1347 * @ioa_cfg: ioa config struct
1348 * @hostrcb: hostrcb struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1354 struct ipr_hostrcb *hostrcb)
1355{
1356 struct ipr_hostrcb_type_07_error *error;
1357
1358 error = &hostrcb->hcam.u.error.u.type_07_error;
1359 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
8cf093e2 1360 strstrip(error->failure_reason);
b0df54bb 1361
8cf093e2
BK
1362 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1363 be32_to_cpu(hostrcb->hcam.u.error.prc));
1364 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1365 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1366 be32_to_cpu(hostrcb->hcam.length) -
1367 (offsetof(struct ipr_hostrcb_error, u) +
1368 offsetof(struct ipr_hostrcb_type_07_error, data)));
1369}
1370
49dc6a18
BK
1371static const struct {
1372 u8 active;
1373 char *desc;
1374} path_active_desc[] = {
1375 { IPR_PATH_NO_INFO, "Path" },
1376 { IPR_PATH_ACTIVE, "Active path" },
1377 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1378};
1379
1380static const struct {
1381 u8 state;
1382 char *desc;
1383} path_state_desc[] = {
1384 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1385 { IPR_PATH_HEALTHY, "is healthy" },
1386 { IPR_PATH_DEGRADED, "is degraded" },
1387 { IPR_PATH_FAILED, "is failed" }
1388};
1389
1390/**
1391 * ipr_log_fabric_path - Log a fabric path error
1392 * @hostrcb: hostrcb struct
1393 * @fabric: fabric descriptor
1394 *
1395 * Return value:
1396 * none
1397 **/
1398static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1399 struct ipr_hostrcb_fabric_desc *fabric)
1400{
1401 int i, j;
1402 u8 path_state = fabric->path_state;
1403 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1404 u8 state = path_state & IPR_PATH_STATE_MASK;
1405
1406 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1407 if (path_active_desc[i].active != active)
1408 continue;
1409
1410 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1411 if (path_state_desc[j].state != state)
1412 continue;
1413
1414 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1415 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1416 path_active_desc[i].desc, path_state_desc[j].desc,
1417 fabric->ioa_port);
1418 } else if (fabric->cascaded_expander == 0xff) {
1419 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1420 path_active_desc[i].desc, path_state_desc[j].desc,
1421 fabric->ioa_port, fabric->phy);
1422 } else if (fabric->phy == 0xff) {
1423 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1424 path_active_desc[i].desc, path_state_desc[j].desc,
1425 fabric->ioa_port, fabric->cascaded_expander);
1426 } else {
1427 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1428 path_active_desc[i].desc, path_state_desc[j].desc,
1429 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1430 }
1431 return;
1432 }
1433 }
1434
1435 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1436 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1437}
1438
1439static const struct {
1440 u8 type;
1441 char *desc;
1442} path_type_desc[] = {
1443 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1444 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1445 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1446 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1447};
1448
1449static const struct {
1450 u8 status;
1451 char *desc;
1452} path_status_desc[] = {
1453 { IPR_PATH_CFG_NO_PROB, "Functional" },
1454 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1455 { IPR_PATH_CFG_FAILED, "Failed" },
1456 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1457 { IPR_PATH_NOT_DETECTED, "Missing" },
1458 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1459};
1460
1461static const char *link_rate[] = {
1462 "unknown",
1463 "disabled",
1464 "phy reset problem",
1465 "spinup hold",
1466 "port selector",
1467 "unknown",
1468 "unknown",
1469 "unknown",
1470 "1.5Gbps",
1471 "3.0Gbps",
1472 "unknown",
1473 "unknown",
1474 "unknown",
1475 "unknown",
1476 "unknown",
1477 "unknown"
1478};
1479
1480/**
1481 * ipr_log_path_elem - Log a fabric path element.
1482 * @hostrcb: hostrcb struct
1483 * @cfg: fabric path element struct
1484 *
1485 * Return value:
1486 * none
1487 **/
1488static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1489 struct ipr_hostrcb_config_element *cfg)
1490{
1491 int i, j;
1492 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1493 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1494
1495 if (type == IPR_PATH_CFG_NOT_EXIST)
1496 return;
1497
1498 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1499 if (path_type_desc[i].type != type)
1500 continue;
1501
1502 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1503 if (path_status_desc[j].status != status)
1504 continue;
1505
1506 if (type == IPR_PATH_CFG_IOA_PORT) {
1507 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1508 path_status_desc[j].desc, path_type_desc[i].desc,
1509 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1510 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1511 } else {
1512 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1513 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1514 path_status_desc[j].desc, path_type_desc[i].desc,
1515 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1516 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1517 } else if (cfg->cascaded_expander == 0xff) {
1518 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1519 "WWN=%08X%08X\n", path_status_desc[j].desc,
1520 path_type_desc[i].desc, cfg->phy,
1521 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1522 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1523 } else if (cfg->phy == 0xff) {
1524 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1525 "WWN=%08X%08X\n", path_status_desc[j].desc,
1526 path_type_desc[i].desc, cfg->cascaded_expander,
1527 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1528 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1529 } else {
1530 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1531 "WWN=%08X%08X\n", path_status_desc[j].desc,
1532 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1533 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1534 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1535 }
1536 }
1537 return;
1538 }
1539 }
1540
1541 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1542 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1543 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1544 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1545}
1546
1547/**
1548 * ipr_log_fabric_error - Log a fabric error.
1549 * @ioa_cfg: ioa config struct
1550 * @hostrcb: hostrcb struct
1551 *
1552 * Return value:
1553 * none
1554 **/
1555static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1556 struct ipr_hostrcb *hostrcb)
1557{
1558 struct ipr_hostrcb_type_20_error *error;
1559 struct ipr_hostrcb_fabric_desc *fabric;
1560 struct ipr_hostrcb_config_element *cfg;
1561 int i, add_len;
1562
1563 error = &hostrcb->hcam.u.error.u.type_20_error;
1564 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1565 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1566
1567 add_len = be32_to_cpu(hostrcb->hcam.length) -
1568 (offsetof(struct ipr_hostrcb_error, u) +
1569 offsetof(struct ipr_hostrcb_type_20_error, desc));
1570
1571 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1572 ipr_log_fabric_path(hostrcb, fabric);
1573 for_each_fabric_cfg(fabric, cfg)
1574 ipr_log_path_elem(hostrcb, cfg);
1575
1576 add_len -= be16_to_cpu(fabric->length);
1577 fabric = (struct ipr_hostrcb_fabric_desc *)
1578 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1579 }
1580
ac719aba 1581 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
1582}
1583
b0df54bb 1584/**
1585 * ipr_log_generic_error - Log an adapter error.
1586 * @ioa_cfg: ioa config struct
1587 * @hostrcb: hostrcb struct
1588 *
1589 * Return value:
1590 * none
1591 **/
1592static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1593 struct ipr_hostrcb *hostrcb)
1594{
ac719aba 1595 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 1596 be32_to_cpu(hostrcb->hcam.length));
1597}
1598
1da177e4
LT
1599/**
1600 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1601 * @ioasc: IOASC
1602 *
1603 * This function will return the index of into the ipr_error_table
1604 * for the specified IOASC. If the IOASC is not in the table,
1605 * 0 will be returned, which points to the entry used for unknown errors.
1606 *
1607 * Return value:
1608 * index into the ipr_error_table
1609 **/
1610static u32 ipr_get_error(u32 ioasc)
1611{
1612 int i;
1613
1614 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 1615 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
1616 return i;
1617
1618 return 0;
1619}
1620
1621/**
1622 * ipr_handle_log_data - Log an adapter error.
1623 * @ioa_cfg: ioa config struct
1624 * @hostrcb: hostrcb struct
1625 *
1626 * This function logs an adapter error to the system.
1627 *
1628 * Return value:
1629 * none
1630 **/
1631static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1632 struct ipr_hostrcb *hostrcb)
1633{
1634 u32 ioasc;
1635 int error_index;
1636
1637 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1638 return;
1639
1640 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1641 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1642
1643 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1644
1645 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1646 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1647 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1648 scsi_report_bus_reset(ioa_cfg->host,
1649 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1650 }
1651
1652 error_index = ipr_get_error(ioasc);
1653
1654 if (!ipr_error_table[error_index].log_hcam)
1655 return;
1656
49dc6a18 1657 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
1658
1659 /* Set indication we have logged an error */
1660 ioa_cfg->errors_logged++;
1661
933916f3 1662 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 1663 return;
cf852037 1664 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1665 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
1666
1667 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
1668 case IPR_HOST_RCB_OVERLAY_ID_2:
1669 ipr_log_cache_error(ioa_cfg, hostrcb);
1670 break;
1671 case IPR_HOST_RCB_OVERLAY_ID_3:
1672 ipr_log_config_error(ioa_cfg, hostrcb);
1673 break;
1674 case IPR_HOST_RCB_OVERLAY_ID_4:
1675 case IPR_HOST_RCB_OVERLAY_ID_6:
1676 ipr_log_array_error(ioa_cfg, hostrcb);
1677 break;
b0df54bb 1678 case IPR_HOST_RCB_OVERLAY_ID_7:
1679 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1680 break;
ee0f05b8 1681 case IPR_HOST_RCB_OVERLAY_ID_12:
1682 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1683 break;
1684 case IPR_HOST_RCB_OVERLAY_ID_13:
1685 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1686 break;
1687 case IPR_HOST_RCB_OVERLAY_ID_14:
1688 case IPR_HOST_RCB_OVERLAY_ID_16:
1689 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1690 break;
1691 case IPR_HOST_RCB_OVERLAY_ID_17:
1692 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1693 break;
49dc6a18
BK
1694 case IPR_HOST_RCB_OVERLAY_ID_20:
1695 ipr_log_fabric_error(ioa_cfg, hostrcb);
1696 break;
cf852037 1697 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 1698 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 1699 default:
a9cfca96 1700 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
1701 break;
1702 }
1703}
1704
1705/**
1706 * ipr_process_error - Op done function for an adapter error log.
1707 * @ipr_cmd: ipr command struct
1708 *
1709 * This function is the op done function for an error log host
1710 * controlled async from the adapter. It will log the error and
1711 * send the HCAM back to the adapter.
1712 *
1713 * Return value:
1714 * none
1715 **/
1716static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1717{
1718 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1719 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1720 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
65f56475 1721 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1da177e4
LT
1722
1723 list_del(&hostrcb->queue);
1724 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1725
1726 if (!ioasc) {
1727 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
1728 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1729 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
1730 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1731 dev_err(&ioa_cfg->pdev->dev,
1732 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1733 }
1734
1735 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1736}
1737
1738/**
1739 * ipr_timeout - An internally generated op has timed out.
1740 * @ipr_cmd: ipr command struct
1741 *
1742 * This function blocks host requests and initiates an
1743 * adapter reset.
1744 *
1745 * Return value:
1746 * none
1747 **/
1748static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1749{
1750 unsigned long lock_flags = 0;
1751 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1752
1753 ENTER;
1754 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1755
1756 ioa_cfg->errors_logged++;
1757 dev_err(&ioa_cfg->pdev->dev,
1758 "Adapter being reset due to command timeout.\n");
1759
1760 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1761 ioa_cfg->sdt_state = GET_DUMP;
1762
1763 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1764 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1765
1766 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1767 LEAVE;
1768}
1769
1770/**
1771 * ipr_oper_timeout - Adapter timed out transitioning to operational
1772 * @ipr_cmd: ipr command struct
1773 *
1774 * This function blocks host requests and initiates an
1775 * adapter reset.
1776 *
1777 * Return value:
1778 * none
1779 **/
1780static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1781{
1782 unsigned long lock_flags = 0;
1783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1784
1785 ENTER;
1786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1787
1788 ioa_cfg->errors_logged++;
1789 dev_err(&ioa_cfg->pdev->dev,
1790 "Adapter timed out transitioning to operational.\n");
1791
1792 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1793 ioa_cfg->sdt_state = GET_DUMP;
1794
1795 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1796 if (ipr_fastfail)
1797 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1798 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1799 }
1800
1801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1802 LEAVE;
1803}
1804
1805/**
1806 * ipr_reset_reload - Reset/Reload the IOA
1807 * @ioa_cfg: ioa config struct
1808 * @shutdown_type: shutdown type
1809 *
1810 * This function resets the adapter and re-initializes it.
1811 * This function assumes that all new host commands have been stopped.
1812 * Return value:
1813 * SUCCESS / FAILED
1814 **/
1815static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1816 enum ipr_shutdown_type shutdown_type)
1817{
1818 if (!ioa_cfg->in_reset_reload)
1819 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1820
1821 spin_unlock_irq(ioa_cfg->host->host_lock);
1822 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1823 spin_lock_irq(ioa_cfg->host->host_lock);
1824
1825 /* If we got hit with a host reset while we were already resetting
1826 the adapter for some reason, and the reset failed. */
1827 if (ioa_cfg->ioa_is_dead) {
1828 ipr_trace;
1829 return FAILED;
1830 }
1831
1832 return SUCCESS;
1833}
1834
1835/**
1836 * ipr_find_ses_entry - Find matching SES in SES table
1837 * @res: resource entry struct of SES
1838 *
1839 * Return value:
1840 * pointer to SES table entry / NULL on failure
1841 **/
1842static const struct ipr_ses_table_entry *
1843ipr_find_ses_entry(struct ipr_resource_entry *res)
1844{
1845 int i, j, matches;
1846 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1847
1848 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1849 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1850 if (ste->compare_product_id_byte[j] == 'X') {
1851 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1852 matches++;
1853 else
1854 break;
1855 } else
1856 matches++;
1857 }
1858
1859 if (matches == IPR_PROD_ID_LEN)
1860 return ste;
1861 }
1862
1863 return NULL;
1864}
1865
1866/**
1867 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1868 * @ioa_cfg: ioa config struct
1869 * @bus: SCSI bus
1870 * @bus_width: bus width
1871 *
1872 * Return value:
1873 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1874 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1875 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1876 * max 160MHz = max 320MB/sec).
1877 **/
1878static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1879{
1880 struct ipr_resource_entry *res;
1881 const struct ipr_ses_table_entry *ste;
1882 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1883
1884 /* Loop through each config table entry in the config table buffer */
1885 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1886 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1887 continue;
1888
1889 if (bus != res->cfgte.res_addr.bus)
1890 continue;
1891
1892 if (!(ste = ipr_find_ses_entry(res)))
1893 continue;
1894
1895 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1896 }
1897
1898 return max_xfer_rate;
1899}
1900
1901/**
1902 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1903 * @ioa_cfg: ioa config struct
1904 * @max_delay: max delay in micro-seconds to wait
1905 *
1906 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1907 *
1908 * Return value:
1909 * 0 on success / other on failure
1910 **/
1911static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1912{
1913 volatile u32 pcii_reg;
1914 int delay = 1;
1915
1916 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1917 while (delay < max_delay) {
1918 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1919
1920 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1921 return 0;
1922
1923 /* udelay cannot be used if delay is more than a few milliseconds */
1924 if ((delay / 1000) > MAX_UDELAY_MS)
1925 mdelay(delay / 1000);
1926 else
1927 udelay(delay);
1928
1929 delay += delay;
1930 }
1931 return -EIO;
1932}
1933
1934/**
1935 * ipr_get_ldump_data_section - Dump IOA memory
1936 * @ioa_cfg: ioa config struct
1937 * @start_addr: adapter address to dump
1938 * @dest: destination kernel buffer
1939 * @length_in_words: length to dump in 4 byte words
1940 *
1941 * Return value:
1942 * 0 on success / -EIO on failure
1943 **/
1944static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1945 u32 start_addr,
1946 __be32 *dest, u32 length_in_words)
1947{
1948 volatile u32 temp_pcii_reg;
1949 int i, delay = 0;
1950
1951 /* Write IOA interrupt reg starting LDUMP state */
1952 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1953 ioa_cfg->regs.set_uproc_interrupt_reg);
1954
1955 /* Wait for IO debug acknowledge */
1956 if (ipr_wait_iodbg_ack(ioa_cfg,
1957 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1958 dev_err(&ioa_cfg->pdev->dev,
1959 "IOA dump long data transfer timeout\n");
1960 return -EIO;
1961 }
1962
1963 /* Signal LDUMP interlocked - clear IO debug ack */
1964 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1965 ioa_cfg->regs.clr_interrupt_reg);
1966
1967 /* Write Mailbox with starting address */
1968 writel(start_addr, ioa_cfg->ioa_mailbox);
1969
1970 /* Signal address valid - clear IOA Reset alert */
1971 writel(IPR_UPROCI_RESET_ALERT,
1972 ioa_cfg->regs.clr_uproc_interrupt_reg);
1973
1974 for (i = 0; i < length_in_words; i++) {
1975 /* Wait for IO debug acknowledge */
1976 if (ipr_wait_iodbg_ack(ioa_cfg,
1977 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1978 dev_err(&ioa_cfg->pdev->dev,
1979 "IOA dump short data transfer timeout\n");
1980 return -EIO;
1981 }
1982
1983 /* Read data from mailbox and increment destination pointer */
1984 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1985 dest++;
1986
1987 /* For all but the last word of data, signal data received */
1988 if (i < (length_in_words - 1)) {
1989 /* Signal dump data received - Clear IO debug Ack */
1990 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1991 ioa_cfg->regs.clr_interrupt_reg);
1992 }
1993 }
1994
1995 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1996 writel(IPR_UPROCI_RESET_ALERT,
1997 ioa_cfg->regs.set_uproc_interrupt_reg);
1998
1999 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2000 ioa_cfg->regs.clr_uproc_interrupt_reg);
2001
2002 /* Signal dump data received - Clear IO debug Ack */
2003 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2004 ioa_cfg->regs.clr_interrupt_reg);
2005
2006 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2007 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2008 temp_pcii_reg =
2009 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2010
2011 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2012 return 0;
2013
2014 udelay(10);
2015 delay += 10;
2016 }
2017
2018 return 0;
2019}
2020
2021#ifdef CONFIG_SCSI_IPR_DUMP
2022/**
2023 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2024 * @ioa_cfg: ioa config struct
2025 * @pci_address: adapter address
2026 * @length: length of data to copy
2027 *
2028 * Copy data from PCI adapter to kernel buffer.
2029 * Note: length MUST be a 4 byte multiple
2030 * Return value:
2031 * 0 on success / other on failure
2032 **/
2033static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2034 unsigned long pci_address, u32 length)
2035{
2036 int bytes_copied = 0;
2037 int cur_len, rc, rem_len, rem_page_len;
2038 __be32 *page;
2039 unsigned long lock_flags = 0;
2040 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2041
2042 while (bytes_copied < length &&
2043 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2044 if (ioa_dump->page_offset >= PAGE_SIZE ||
2045 ioa_dump->page_offset == 0) {
2046 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2047
2048 if (!page) {
2049 ipr_trace;
2050 return bytes_copied;
2051 }
2052
2053 ioa_dump->page_offset = 0;
2054 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2055 ioa_dump->next_page_index++;
2056 } else
2057 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2058
2059 rem_len = length - bytes_copied;
2060 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2061 cur_len = min(rem_len, rem_page_len);
2062
2063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2064 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2065 rc = -EIO;
2066 } else {
2067 rc = ipr_get_ldump_data_section(ioa_cfg,
2068 pci_address + bytes_copied,
2069 &page[ioa_dump->page_offset / 4],
2070 (cur_len / sizeof(u32)));
2071 }
2072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2073
2074 if (!rc) {
2075 ioa_dump->page_offset += cur_len;
2076 bytes_copied += cur_len;
2077 } else {
2078 ipr_trace;
2079 break;
2080 }
2081 schedule();
2082 }
2083
2084 return bytes_copied;
2085}
2086
2087/**
2088 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2089 * @hdr: dump entry header struct
2090 *
2091 * Return value:
2092 * nothing
2093 **/
2094static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2095{
2096 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2097 hdr->num_elems = 1;
2098 hdr->offset = sizeof(*hdr);
2099 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2100}
2101
2102/**
2103 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2104 * @ioa_cfg: ioa config struct
2105 * @driver_dump: driver dump struct
2106 *
2107 * Return value:
2108 * nothing
2109 **/
2110static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2111 struct ipr_driver_dump *driver_dump)
2112{
2113 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2114
2115 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2116 driver_dump->ioa_type_entry.hdr.len =
2117 sizeof(struct ipr_dump_ioa_type_entry) -
2118 sizeof(struct ipr_dump_entry_header);
2119 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2120 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2121 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2122 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2123 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2124 ucode_vpd->minor_release[1];
2125 driver_dump->hdr.num_entries++;
2126}
2127
2128/**
2129 * ipr_dump_version_data - Fill in the driver version in the dump.
2130 * @ioa_cfg: ioa config struct
2131 * @driver_dump: driver dump struct
2132 *
2133 * Return value:
2134 * nothing
2135 **/
2136static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2137 struct ipr_driver_dump *driver_dump)
2138{
2139 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2140 driver_dump->version_entry.hdr.len =
2141 sizeof(struct ipr_dump_version_entry) -
2142 sizeof(struct ipr_dump_entry_header);
2143 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2144 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2145 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2146 driver_dump->hdr.num_entries++;
2147}
2148
2149/**
2150 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2151 * @ioa_cfg: ioa config struct
2152 * @driver_dump: driver dump struct
2153 *
2154 * Return value:
2155 * nothing
2156 **/
2157static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2158 struct ipr_driver_dump *driver_dump)
2159{
2160 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2161 driver_dump->trace_entry.hdr.len =
2162 sizeof(struct ipr_dump_trace_entry) -
2163 sizeof(struct ipr_dump_entry_header);
2164 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2165 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2166 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2167 driver_dump->hdr.num_entries++;
2168}
2169
2170/**
2171 * ipr_dump_location_data - Fill in the IOA location in the dump.
2172 * @ioa_cfg: ioa config struct
2173 * @driver_dump: driver dump struct
2174 *
2175 * Return value:
2176 * nothing
2177 **/
2178static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2179 struct ipr_driver_dump *driver_dump)
2180{
2181 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2182 driver_dump->location_entry.hdr.len =
2183 sizeof(struct ipr_dump_location_entry) -
2184 sizeof(struct ipr_dump_entry_header);
2185 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2186 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2187 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2188 driver_dump->hdr.num_entries++;
2189}
2190
2191/**
2192 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2193 * @ioa_cfg: ioa config struct
2194 * @dump: dump struct
2195 *
2196 * Return value:
2197 * nothing
2198 **/
2199static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2200{
2201 unsigned long start_addr, sdt_word;
2202 unsigned long lock_flags = 0;
2203 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2204 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2205 u32 num_entries, start_off, end_off;
2206 u32 bytes_to_copy, bytes_copied, rc;
2207 struct ipr_sdt *sdt;
2208 int i;
2209
2210 ENTER;
2211
2212 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2213
2214 if (ioa_cfg->sdt_state != GET_DUMP) {
2215 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2216 return;
2217 }
2218
2219 start_addr = readl(ioa_cfg->ioa_mailbox);
2220
2221 if (!ipr_sdt_is_fmt2(start_addr)) {
2222 dev_err(&ioa_cfg->pdev->dev,
2223 "Invalid dump table format: %lx\n", start_addr);
2224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2225 return;
2226 }
2227
2228 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2229
2230 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2231
2232 /* Initialize the overall dump header */
2233 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2234 driver_dump->hdr.num_entries = 1;
2235 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2236 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2237 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2238 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2239
2240 ipr_dump_version_data(ioa_cfg, driver_dump);
2241 ipr_dump_location_data(ioa_cfg, driver_dump);
2242 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2243 ipr_dump_trace_data(ioa_cfg, driver_dump);
2244
2245 /* Update dump_header */
2246 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2247
2248 /* IOA Dump entry */
2249 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2250 ioa_dump->format = IPR_SDT_FMT2;
2251 ioa_dump->hdr.len = 0;
2252 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2253 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2254
2255 /* First entries in sdt are actually a list of dump addresses and
2256 lengths to gather the real dump data. sdt represents the pointer
2257 to the ioa generated dump table. Dump data will be extracted based
2258 on entries in this table */
2259 sdt = &ioa_dump->sdt;
2260
2261 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2262 sizeof(struct ipr_sdt) / sizeof(__be32));
2263
2264 /* Smart Dump table is ready to use and the first entry is valid */
2265 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2266 dev_err(&ioa_cfg->pdev->dev,
2267 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2268 rc, be32_to_cpu(sdt->hdr.state));
2269 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2270 ioa_cfg->sdt_state = DUMP_OBTAINED;
2271 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2272 return;
2273 }
2274
2275 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2276
2277 if (num_entries > IPR_NUM_SDT_ENTRIES)
2278 num_entries = IPR_NUM_SDT_ENTRIES;
2279
2280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2281
2282 for (i = 0; i < num_entries; i++) {
2283 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2284 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2285 break;
2286 }
2287
2288 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2289 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2290 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2291 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2292
2293 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2294 bytes_to_copy = end_off - start_off;
2295 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2296 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2297 continue;
2298 }
2299
2300 /* Copy data from adapter to driver buffers */
2301 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2302 bytes_to_copy);
2303
2304 ioa_dump->hdr.len += bytes_copied;
2305
2306 if (bytes_copied != bytes_to_copy) {
2307 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2308 break;
2309 }
2310 }
2311 }
2312 }
2313
2314 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2315
2316 /* Update dump_header */
2317 driver_dump->hdr.len += ioa_dump->hdr.len;
2318 wmb();
2319 ioa_cfg->sdt_state = DUMP_OBTAINED;
2320 LEAVE;
2321}
2322
2323#else
2324#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2325#endif
2326
2327/**
2328 * ipr_release_dump - Free adapter dump memory
2329 * @kref: kref struct
2330 *
2331 * Return value:
2332 * nothing
2333 **/
2334static void ipr_release_dump(struct kref *kref)
2335{
2336 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2337 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2338 unsigned long lock_flags = 0;
2339 int i;
2340
2341 ENTER;
2342 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2343 ioa_cfg->dump = NULL;
2344 ioa_cfg->sdt_state = INACTIVE;
2345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2346
2347 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2348 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2349
2350 kfree(dump);
2351 LEAVE;
2352}
2353
2354/**
2355 * ipr_worker_thread - Worker thread
c4028958 2356 * @work: ioa config struct
1da177e4
LT
2357 *
2358 * Called at task level from a work thread. This function takes care
2359 * of adding and removing device from the mid-layer as configuration
2360 * changes are detected by the adapter.
2361 *
2362 * Return value:
2363 * nothing
2364 **/
c4028958 2365static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
2366{
2367 unsigned long lock_flags;
2368 struct ipr_resource_entry *res;
2369 struct scsi_device *sdev;
2370 struct ipr_dump *dump;
c4028958
DH
2371 struct ipr_ioa_cfg *ioa_cfg =
2372 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
2373 u8 bus, target, lun;
2374 int did_work;
2375
2376 ENTER;
2377 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2378
2379 if (ioa_cfg->sdt_state == GET_DUMP) {
2380 dump = ioa_cfg->dump;
2381 if (!dump) {
2382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383 return;
2384 }
2385 kref_get(&dump->kref);
2386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2387 ipr_get_ioa_dump(ioa_cfg, dump);
2388 kref_put(&dump->kref, ipr_release_dump);
2389
2390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2391 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2392 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2394 return;
2395 }
2396
2397restart:
2398 do {
2399 did_work = 0;
2400 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2402 return;
2403 }
2404
2405 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2406 if (res->del_from_ml && res->sdev) {
2407 did_work = 1;
2408 sdev = res->sdev;
2409 if (!scsi_device_get(sdev)) {
1da177e4
LT
2410 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2412 scsi_remove_device(sdev);
2413 scsi_device_put(sdev);
2414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2415 }
2416 break;
2417 }
2418 }
2419 } while(did_work);
2420
2421 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2422 if (res->add_to_ml) {
2423 bus = res->cfgte.res_addr.bus;
2424 target = res->cfgte.res_addr.target;
2425 lun = res->cfgte.res_addr.lun;
1121b794 2426 res->add_to_ml = 0;
1da177e4
LT
2427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2428 scsi_add_device(ioa_cfg->host, bus, target, lun);
2429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2430 goto restart;
2431 }
2432 }
2433
2434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 2435 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
2436 LEAVE;
2437}
2438
2439#ifdef CONFIG_SCSI_IPR_TRACE
2440/**
2441 * ipr_read_trace - Dump the adapter trace
2442 * @kobj: kobject struct
91a69029 2443 * @bin_attr: bin_attribute struct
1da177e4
LT
2444 * @buf: buffer
2445 * @off: offset
2446 * @count: buffer size
2447 *
2448 * Return value:
2449 * number of bytes printed to buffer
2450 **/
91a69029
ZR
2451static ssize_t ipr_read_trace(struct kobject *kobj,
2452 struct bin_attribute *bin_attr,
2453 char *buf, loff_t off, size_t count)
1da177e4 2454{
ee959b00
TJ
2455 struct device *dev = container_of(kobj, struct device, kobj);
2456 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2458 unsigned long lock_flags = 0;
2459 int size = IPR_TRACE_SIZE;
2460 char *src = (char *)ioa_cfg->trace;
2461
2462 if (off > size)
2463 return 0;
2464 if (off + count > size) {
2465 size -= off;
2466 count = size;
2467 }
2468
2469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2470 memcpy(buf, &src[off], count);
2471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2472 return count;
2473}
2474
2475static struct bin_attribute ipr_trace_attr = {
2476 .attr = {
2477 .name = "trace",
2478 .mode = S_IRUGO,
2479 },
2480 .size = 0,
2481 .read = ipr_read_trace,
2482};
2483#endif
2484
62275040 2485static const struct {
2486 enum ipr_cache_state state;
2487 char *name;
2488} cache_state [] = {
2489 { CACHE_NONE, "none" },
2490 { CACHE_DISABLED, "disabled" },
2491 { CACHE_ENABLED, "enabled" }
2492};
2493
2494/**
2495 * ipr_show_write_caching - Show the write caching attribute
ee959b00
TJ
2496 * @dev: device struct
2497 * @buf: buffer
62275040 2498 *
2499 * Return value:
2500 * number of bytes printed to buffer
2501 **/
ee959b00
TJ
2502static ssize_t ipr_show_write_caching(struct device *dev,
2503 struct device_attribute *attr, char *buf)
62275040 2504{
ee959b00 2505 struct Scsi_Host *shost = class_to_shost(dev);
62275040 2506 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2507 unsigned long lock_flags = 0;
2508 int i, len = 0;
2509
2510 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2511 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2512 if (cache_state[i].state == ioa_cfg->cache_state) {
2513 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2514 break;
2515 }
2516 }
2517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2518 return len;
2519}
2520
2521
2522/**
2523 * ipr_store_write_caching - Enable/disable adapter write cache
ee959b00
TJ
2524 * @dev: device struct
2525 * @buf: buffer
2526 * @count: buffer size
62275040 2527 *
2528 * This function will enable/disable adapter write cache.
2529 *
2530 * Return value:
2531 * count on success / other on failure
2532 **/
ee959b00
TJ
2533static ssize_t ipr_store_write_caching(struct device *dev,
2534 struct device_attribute *attr,
2535 const char *buf, size_t count)
62275040 2536{
ee959b00 2537 struct Scsi_Host *shost = class_to_shost(dev);
62275040 2538 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2539 unsigned long lock_flags = 0;
2540 enum ipr_cache_state new_state = CACHE_INVALID;
2541 int i;
2542
2543 if (!capable(CAP_SYS_ADMIN))
2544 return -EACCES;
2545 if (ioa_cfg->cache_state == CACHE_NONE)
2546 return -EINVAL;
2547
2548 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2549 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2550 new_state = cache_state[i].state;
2551 break;
2552 }
2553 }
2554
2555 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2556 return -EINVAL;
2557
2558 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2559 if (ioa_cfg->cache_state == new_state) {
2560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2561 return count;
2562 }
2563
2564 ioa_cfg->cache_state = new_state;
2565 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2566 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2567 if (!ioa_cfg->in_reset_reload)
2568 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2569 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2570 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2571
2572 return count;
2573}
2574
ee959b00 2575static struct device_attribute ipr_ioa_cache_attr = {
62275040 2576 .attr = {
2577 .name = "write_cache",
2578 .mode = S_IRUGO | S_IWUSR,
2579 },
2580 .show = ipr_show_write_caching,
2581 .store = ipr_store_write_caching
2582};
2583
1da177e4
LT
2584/**
2585 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
2586 * @dev: class device struct
2587 * @buf: buffer
1da177e4
LT
2588 *
2589 * Return value:
2590 * number of bytes printed to buffer
2591 **/
ee959b00
TJ
2592static ssize_t ipr_show_fw_version(struct device *dev,
2593 struct device_attribute *attr, char *buf)
1da177e4 2594{
ee959b00 2595 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2596 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2597 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2598 unsigned long lock_flags = 0;
2599 int len;
2600
2601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2602 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2603 ucode_vpd->major_release, ucode_vpd->card_type,
2604 ucode_vpd->minor_release[0],
2605 ucode_vpd->minor_release[1]);
2606 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2607 return len;
2608}
2609
ee959b00 2610static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
2611 .attr = {
2612 .name = "fw_version",
2613 .mode = S_IRUGO,
2614 },
2615 .show = ipr_show_fw_version,
2616};
2617
2618/**
2619 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
2620 * @dev: class device struct
2621 * @buf: buffer
1da177e4
LT
2622 *
2623 * Return value:
2624 * number of bytes printed to buffer
2625 **/
ee959b00
TJ
2626static ssize_t ipr_show_log_level(struct device *dev,
2627 struct device_attribute *attr, char *buf)
1da177e4 2628{
ee959b00 2629 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2630 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2631 unsigned long lock_flags = 0;
2632 int len;
2633
2634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2635 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2637 return len;
2638}
2639
2640/**
2641 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
2642 * @dev: class device struct
2643 * @buf: buffer
1da177e4
LT
2644 *
2645 * Return value:
2646 * number of bytes printed to buffer
2647 **/
ee959b00
TJ
2648static ssize_t ipr_store_log_level(struct device *dev,
2649 struct device_attribute *attr,
1da177e4
LT
2650 const char *buf, size_t count)
2651{
ee959b00 2652 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2653 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2654 unsigned long lock_flags = 0;
2655
2656 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2657 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2659 return strlen(buf);
2660}
2661
ee959b00 2662static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
2663 .attr = {
2664 .name = "log_level",
2665 .mode = S_IRUGO | S_IWUSR,
2666 },
2667 .show = ipr_show_log_level,
2668 .store = ipr_store_log_level
2669};
2670
2671/**
2672 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
2673 * @dev: device struct
2674 * @buf: buffer
2675 * @count: buffer size
1da177e4
LT
2676 *
2677 * This function will reset the adapter and wait a reasonable
2678 * amount of time for any errors that the adapter might log.
2679 *
2680 * Return value:
2681 * count on success / other on failure
2682 **/
ee959b00
TJ
2683static ssize_t ipr_store_diagnostics(struct device *dev,
2684 struct device_attribute *attr,
1da177e4
LT
2685 const char *buf, size_t count)
2686{
ee959b00 2687 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2689 unsigned long lock_flags = 0;
2690 int rc = count;
2691
2692 if (!capable(CAP_SYS_ADMIN))
2693 return -EACCES;
2694
1da177e4 2695 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
2696 while(ioa_cfg->in_reset_reload) {
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2699 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2700 }
2701
1da177e4
LT
2702 ioa_cfg->errors_logged = 0;
2703 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2704
2705 if (ioa_cfg->in_reset_reload) {
2706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2707 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2708
2709 /* Wait for a second for any errors to be logged */
2710 msleep(1000);
2711 } else {
2712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 return -EIO;
2714 }
2715
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2718 rc = -EIO;
2719 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2720
2721 return rc;
2722}
2723
ee959b00 2724static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
2725 .attr = {
2726 .name = "run_diagnostics",
2727 .mode = S_IWUSR,
2728 },
2729 .store = ipr_store_diagnostics
2730};
2731
f37eb54b 2732/**
2733 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
2734 * @class_dev: device struct
2735 * @buf: buffer
f37eb54b 2736 *
2737 * Return value:
2738 * number of bytes printed to buffer
2739 **/
ee959b00
TJ
2740static ssize_t ipr_show_adapter_state(struct device *dev,
2741 struct device_attribute *attr, char *buf)
f37eb54b 2742{
ee959b00 2743 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 2744 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2745 unsigned long lock_flags = 0;
2746 int len;
2747
2748 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2749 if (ioa_cfg->ioa_is_dead)
2750 len = snprintf(buf, PAGE_SIZE, "offline\n");
2751 else
2752 len = snprintf(buf, PAGE_SIZE, "online\n");
2753 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2754 return len;
2755}
2756
2757/**
2758 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
2759 * @dev: device struct
2760 * @buf: buffer
2761 * @count: buffer size
f37eb54b 2762 *
2763 * This function will change the adapter's state.
2764 *
2765 * Return value:
2766 * count on success / other on failure
2767 **/
ee959b00
TJ
2768static ssize_t ipr_store_adapter_state(struct device *dev,
2769 struct device_attribute *attr,
f37eb54b 2770 const char *buf, size_t count)
2771{
ee959b00 2772 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 2773 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2774 unsigned long lock_flags;
2775 int result = count;
2776
2777 if (!capable(CAP_SYS_ADMIN))
2778 return -EACCES;
2779
2780 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2781 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2782 ioa_cfg->ioa_is_dead = 0;
2783 ioa_cfg->reset_retries = 0;
2784 ioa_cfg->in_ioa_bringdown = 0;
2785 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2786 }
2787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2788 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2789
2790 return result;
2791}
2792
ee959b00 2793static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 2794 .attr = {
49dd0961 2795 .name = "online_state",
f37eb54b 2796 .mode = S_IRUGO | S_IWUSR,
2797 },
2798 .show = ipr_show_adapter_state,
2799 .store = ipr_store_adapter_state
2800};
2801
1da177e4
LT
2802/**
2803 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
2804 * @dev: device struct
2805 * @buf: buffer
2806 * @count: buffer size
1da177e4
LT
2807 *
2808 * This function will reset the adapter.
2809 *
2810 * Return value:
2811 * count on success / other on failure
2812 **/
ee959b00
TJ
2813static ssize_t ipr_store_reset_adapter(struct device *dev,
2814 struct device_attribute *attr,
1da177e4
LT
2815 const char *buf, size_t count)
2816{
ee959b00 2817 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
2818 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2819 unsigned long lock_flags;
2820 int result = count;
2821
2822 if (!capable(CAP_SYS_ADMIN))
2823 return -EACCES;
2824
2825 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2826 if (!ioa_cfg->in_reset_reload)
2827 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2829 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2830
2831 return result;
2832}
2833
ee959b00 2834static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
2835 .attr = {
2836 .name = "reset_host",
2837 .mode = S_IWUSR,
2838 },
2839 .store = ipr_store_reset_adapter
2840};
2841
2842/**
2843 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2844 * @buf_len: buffer length
2845 *
2846 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2847 * list to use for microcode download
2848 *
2849 * Return value:
2850 * pointer to sglist / NULL on failure
2851 **/
2852static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2853{
2854 int sg_size, order, bsize_elem, num_elem, i, j;
2855 struct ipr_sglist *sglist;
2856 struct scatterlist *scatterlist;
2857 struct page *page;
2858
2859 /* Get the minimum size per scatter/gather element */
2860 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2861
2862 /* Get the actual size per element */
2863 order = get_order(sg_size);
2864
2865 /* Determine the actual number of bytes per element */
2866 bsize_elem = PAGE_SIZE * (1 << order);
2867
2868 /* Determine the actual number of sg entries needed */
2869 if (buf_len % bsize_elem)
2870 num_elem = (buf_len / bsize_elem) + 1;
2871 else
2872 num_elem = buf_len / bsize_elem;
2873
2874 /* Allocate a scatter/gather list for the DMA */
0bc42e35 2875 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
2876 (sizeof(struct scatterlist) * (num_elem - 1)),
2877 GFP_KERNEL);
2878
2879 if (sglist == NULL) {
2880 ipr_trace;
2881 return NULL;
2882 }
2883
1da177e4 2884 scatterlist = sglist->scatterlist;
45711f1a 2885 sg_init_table(scatterlist, num_elem);
1da177e4
LT
2886
2887 sglist->order = order;
2888 sglist->num_sg = num_elem;
2889
2890 /* Allocate a bunch of sg elements */
2891 for (i = 0; i < num_elem; i++) {
2892 page = alloc_pages(GFP_KERNEL, order);
2893 if (!page) {
2894 ipr_trace;
2895
2896 /* Free up what we already allocated */
2897 for (j = i - 1; j >= 0; j--)
45711f1a 2898 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
2899 kfree(sglist);
2900 return NULL;
2901 }
2902
642f1490 2903 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
2904 }
2905
2906 return sglist;
2907}
2908
2909/**
2910 * ipr_free_ucode_buffer - Frees a microcode download buffer
2911 * @p_dnld: scatter/gather list pointer
2912 *
2913 * Free a DMA'able ucode download buffer previously allocated with
2914 * ipr_alloc_ucode_buffer
2915 *
2916 * Return value:
2917 * nothing
2918 **/
2919static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2920{
2921 int i;
2922
2923 for (i = 0; i < sglist->num_sg; i++)
45711f1a 2924 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
2925
2926 kfree(sglist);
2927}
2928
2929/**
2930 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2931 * @sglist: scatter/gather list pointer
2932 * @buffer: buffer pointer
2933 * @len: buffer length
2934 *
2935 * Copy a microcode image from a user buffer into a buffer allocated by
2936 * ipr_alloc_ucode_buffer
2937 *
2938 * Return value:
2939 * 0 on success / other on failure
2940 **/
2941static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2942 u8 *buffer, u32 len)
2943{
2944 int bsize_elem, i, result = 0;
2945 struct scatterlist *scatterlist;
2946 void *kaddr;
2947
2948 /* Determine the actual number of bytes per element */
2949 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2950
2951 scatterlist = sglist->scatterlist;
2952
2953 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
2954 struct page *page = sg_page(&scatterlist[i]);
2955
2956 kaddr = kmap(page);
1da177e4 2957 memcpy(kaddr, buffer, bsize_elem);
45711f1a 2958 kunmap(page);
1da177e4
LT
2959
2960 scatterlist[i].length = bsize_elem;
2961
2962 if (result != 0) {
2963 ipr_trace;
2964 return result;
2965 }
2966 }
2967
2968 if (len % bsize_elem) {
45711f1a
JA
2969 struct page *page = sg_page(&scatterlist[i]);
2970
2971 kaddr = kmap(page);
1da177e4 2972 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 2973 kunmap(page);
1da177e4
LT
2974
2975 scatterlist[i].length = len % bsize_elem;
2976 }
2977
2978 sglist->buffer_len = len;
2979 return result;
2980}
2981
2982/**
12baa420 2983 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
2984 * @ipr_cmd: ipr command struct
2985 * @sglist: scatter/gather list
1da177e4 2986 *
12baa420 2987 * Builds a microcode download IOA data list (IOADL).
1da177e4 2988 *
1da177e4 2989 **/
12baa420 2990static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2991 struct ipr_sglist *sglist)
1da177e4 2992{
1da177e4
LT
2993 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2994 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2995 struct scatterlist *scatterlist = sglist->scatterlist;
2996 int i;
2997
12baa420 2998 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 2999 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
12baa420 3000 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
1da177e4
LT
3001 ioarcb->write_ioadl_len =
3002 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3003
3004 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3005 ioadl[i].flags_and_data_len =
3006 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3007 ioadl[i].address =
3008 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3009 }
3010
12baa420 3011 ioadl[i-1].flags_and_data_len |=
3012 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3013}
3014
3015/**
3016 * ipr_update_ioa_ucode - Update IOA's microcode
3017 * @ioa_cfg: ioa config struct
3018 * @sglist: scatter/gather list
3019 *
3020 * Initiate an adapter reset to update the IOA's microcode
3021 *
3022 * Return value:
3023 * 0 on success / -EIO on failure
3024 **/
3025static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3026 struct ipr_sglist *sglist)
3027{
3028 unsigned long lock_flags;
3029
3030 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3031 while(ioa_cfg->in_reset_reload) {
3032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3033 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3035 }
12baa420 3036
3037 if (ioa_cfg->ucode_sglist) {
3038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3039 dev_err(&ioa_cfg->pdev->dev,
3040 "Microcode download already in progress\n");
3041 return -EIO;
1da177e4 3042 }
12baa420 3043
3044 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3045 sglist->num_sg, DMA_TO_DEVICE);
3046
3047 if (!sglist->num_dma_sg) {
3048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3049 dev_err(&ioa_cfg->pdev->dev,
3050 "Failed to map microcode download buffer!\n");
1da177e4
LT
3051 return -EIO;
3052 }
3053
12baa420 3054 ioa_cfg->ucode_sglist = sglist;
3055 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3057 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3058
3059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3060 ioa_cfg->ucode_sglist = NULL;
3061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3062 return 0;
3063}
3064
3065/**
3066 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3067 * @class_dev: device struct
3068 * @buf: buffer
3069 * @count: buffer size
1da177e4
LT
3070 *
3071 * This function will update the firmware on the adapter.
3072 *
3073 * Return value:
3074 * count on success / other on failure
3075 **/
ee959b00
TJ
3076static ssize_t ipr_store_update_fw(struct device *dev,
3077 struct device_attribute *attr,
3078 const char *buf, size_t count)
1da177e4 3079{
ee959b00 3080 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3081 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3082 struct ipr_ucode_image_header *image_hdr;
3083 const struct firmware *fw_entry;
3084 struct ipr_sglist *sglist;
1da177e4
LT
3085 char fname[100];
3086 char *src;
3087 int len, result, dnld_size;
3088
3089 if (!capable(CAP_SYS_ADMIN))
3090 return -EACCES;
3091
3092 len = snprintf(fname, 99, "%s", buf);
3093 fname[len-1] = '\0';
3094
3095 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3096 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3097 return -EIO;
3098 }
3099
3100 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3101
3102 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3103 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3104 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3105 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3106 release_firmware(fw_entry);
3107 return -EINVAL;
3108 }
3109
3110 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3111 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3112 sglist = ipr_alloc_ucode_buffer(dnld_size);
3113
3114 if (!sglist) {
3115 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3116 release_firmware(fw_entry);
3117 return -ENOMEM;
3118 }
3119
3120 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3121
3122 if (result) {
3123 dev_err(&ioa_cfg->pdev->dev,
3124 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3125 goto out;
1da177e4
LT
3126 }
3127
12baa420 3128 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3129
12baa420 3130 if (!result)
3131 result = count;
3132out:
1da177e4
LT
3133 ipr_free_ucode_buffer(sglist);
3134 release_firmware(fw_entry);
12baa420 3135 return result;
1da177e4
LT
3136}
3137
ee959b00 3138static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3139 .attr = {
3140 .name = "update_fw",
3141 .mode = S_IWUSR,
3142 },
3143 .store = ipr_store_update_fw
3144};
3145
ee959b00 3146static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3147 &ipr_fw_version_attr,
3148 &ipr_log_level_attr,
3149 &ipr_diagnostics_attr,
f37eb54b 3150 &ipr_ioa_state_attr,
1da177e4
LT
3151 &ipr_ioa_reset_attr,
3152 &ipr_update_fw_attr,
62275040 3153 &ipr_ioa_cache_attr,
1da177e4
LT
3154 NULL,
3155};
3156
3157#ifdef CONFIG_SCSI_IPR_DUMP
3158/**
3159 * ipr_read_dump - Dump the adapter
3160 * @kobj: kobject struct
91a69029 3161 * @bin_attr: bin_attribute struct
1da177e4
LT
3162 * @buf: buffer
3163 * @off: offset
3164 * @count: buffer size
3165 *
3166 * Return value:
3167 * number of bytes printed to buffer
3168 **/
91a69029
ZR
3169static ssize_t ipr_read_dump(struct kobject *kobj,
3170 struct bin_attribute *bin_attr,
3171 char *buf, loff_t off, size_t count)
1da177e4 3172{
ee959b00 3173 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3174 struct Scsi_Host *shost = class_to_shost(cdev);
3175 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3176 struct ipr_dump *dump;
3177 unsigned long lock_flags = 0;
3178 char *src;
3179 int len;
3180 size_t rc = count;
3181
3182 if (!capable(CAP_SYS_ADMIN))
3183 return -EACCES;
3184
3185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3186 dump = ioa_cfg->dump;
3187
3188 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190 return 0;
3191 }
3192 kref_get(&dump->kref);
3193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194
3195 if (off > dump->driver_dump.hdr.len) {
3196 kref_put(&dump->kref, ipr_release_dump);
3197 return 0;
3198 }
3199
3200 if (off + count > dump->driver_dump.hdr.len) {
3201 count = dump->driver_dump.hdr.len - off;
3202 rc = count;
3203 }
3204
3205 if (count && off < sizeof(dump->driver_dump)) {
3206 if (off + count > sizeof(dump->driver_dump))
3207 len = sizeof(dump->driver_dump) - off;
3208 else
3209 len = count;
3210 src = (u8 *)&dump->driver_dump + off;
3211 memcpy(buf, src, len);
3212 buf += len;
3213 off += len;
3214 count -= len;
3215 }
3216
3217 off -= sizeof(dump->driver_dump);
3218
3219 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3220 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3221 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3222 else
3223 len = count;
3224 src = (u8 *)&dump->ioa_dump + off;
3225 memcpy(buf, src, len);
3226 buf += len;
3227 off += len;
3228 count -= len;
3229 }
3230
3231 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3232
3233 while (count) {
3234 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3235 len = PAGE_ALIGN(off) - off;
3236 else
3237 len = count;
3238 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3239 src += off & ~PAGE_MASK;
3240 memcpy(buf, src, len);
3241 buf += len;
3242 off += len;
3243 count -= len;
3244 }
3245
3246 kref_put(&dump->kref, ipr_release_dump);
3247 return rc;
3248}
3249
3250/**
3251 * ipr_alloc_dump - Prepare for adapter dump
3252 * @ioa_cfg: ioa config struct
3253 *
3254 * Return value:
3255 * 0 on success / other on failure
3256 **/
3257static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3258{
3259 struct ipr_dump *dump;
3260 unsigned long lock_flags = 0;
3261
0bc42e35 3262 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3263
3264 if (!dump) {
3265 ipr_err("Dump memory allocation failed\n");
3266 return -ENOMEM;
3267 }
3268
1da177e4
LT
3269 kref_init(&dump->kref);
3270 dump->ioa_cfg = ioa_cfg;
3271
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273
3274 if (INACTIVE != ioa_cfg->sdt_state) {
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276 kfree(dump);
3277 return 0;
3278 }
3279
3280 ioa_cfg->dump = dump;
3281 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3282 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3283 ioa_cfg->dump_taken = 1;
3284 schedule_work(&ioa_cfg->work_q);
3285 }
3286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3287
1da177e4
LT
3288 return 0;
3289}
3290
3291/**
3292 * ipr_free_dump - Free adapter dump memory
3293 * @ioa_cfg: ioa config struct
3294 *
3295 * Return value:
3296 * 0 on success / other on failure
3297 **/
3298static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3299{
3300 struct ipr_dump *dump;
3301 unsigned long lock_flags = 0;
3302
3303 ENTER;
3304
3305 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3306 dump = ioa_cfg->dump;
3307 if (!dump) {
3308 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3309 return 0;
3310 }
3311
3312 ioa_cfg->dump = NULL;
3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314
3315 kref_put(&dump->kref, ipr_release_dump);
3316
3317 LEAVE;
3318 return 0;
3319}
3320
3321/**
3322 * ipr_write_dump - Setup dump state of adapter
3323 * @kobj: kobject struct
91a69029 3324 * @bin_attr: bin_attribute struct
1da177e4
LT
3325 * @buf: buffer
3326 * @off: offset
3327 * @count: buffer size
3328 *
3329 * Return value:
3330 * number of bytes printed to buffer
3331 **/
91a69029
ZR
3332static ssize_t ipr_write_dump(struct kobject *kobj,
3333 struct bin_attribute *bin_attr,
3334 char *buf, loff_t off, size_t count)
1da177e4 3335{
ee959b00 3336 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3337 struct Scsi_Host *shost = class_to_shost(cdev);
3338 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339 int rc;
3340
3341 if (!capable(CAP_SYS_ADMIN))
3342 return -EACCES;
3343
3344 if (buf[0] == '1')
3345 rc = ipr_alloc_dump(ioa_cfg);
3346 else if (buf[0] == '0')
3347 rc = ipr_free_dump(ioa_cfg);
3348 else
3349 return -EINVAL;
3350
3351 if (rc)
3352 return rc;
3353 else
3354 return count;
3355}
3356
3357static struct bin_attribute ipr_dump_attr = {
3358 .attr = {
3359 .name = "dump",
3360 .mode = S_IRUSR | S_IWUSR,
3361 },
3362 .size = 0,
3363 .read = ipr_read_dump,
3364 .write = ipr_write_dump
3365};
3366#else
3367static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3368#endif
3369
3370/**
3371 * ipr_change_queue_depth - Change the device's queue depth
3372 * @sdev: scsi device struct
3373 * @qdepth: depth to set
3374 *
3375 * Return value:
3376 * actual depth set
3377 **/
3378static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3379{
35a39691
BK
3380 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3381 struct ipr_resource_entry *res;
3382 unsigned long lock_flags = 0;
3383
3384 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3385 res = (struct ipr_resource_entry *)sdev->hostdata;
3386
3387 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3388 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390
1da177e4
LT
3391 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3392 return sdev->queue_depth;
3393}
3394
3395/**
3396 * ipr_change_queue_type - Change the device's queue type
3397 * @dsev: scsi device struct
3398 * @tag_type: type of tags to use
3399 *
3400 * Return value:
3401 * actual queue type set
3402 **/
3403static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3404{
3405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3406 struct ipr_resource_entry *res;
3407 unsigned long lock_flags = 0;
3408
3409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3410 res = (struct ipr_resource_entry *)sdev->hostdata;
3411
3412 if (res) {
3413 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3414 /*
3415 * We don't bother quiescing the device here since the
3416 * adapter firmware does it for us.
3417 */
3418 scsi_set_tag_type(sdev, tag_type);
3419
3420 if (tag_type)
3421 scsi_activate_tcq(sdev, sdev->queue_depth);
3422 else
3423 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3424 } else
3425 tag_type = 0;
3426 } else
3427 tag_type = 0;
3428
3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430 return tag_type;
3431}
3432
3433/**
3434 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3435 * @dev: device struct
3436 * @buf: buffer
3437 *
3438 * Return value:
3439 * number of bytes printed to buffer
3440 **/
10523b3b 3441static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
3442{
3443 struct scsi_device *sdev = to_scsi_device(dev);
3444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3445 struct ipr_resource_entry *res;
3446 unsigned long lock_flags = 0;
3447 ssize_t len = -ENXIO;
3448
3449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3450 res = (struct ipr_resource_entry *)sdev->hostdata;
3451 if (res)
3452 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 return len;
3455}
3456
3457static struct device_attribute ipr_adapter_handle_attr = {
3458 .attr = {
3459 .name = "adapter_handle",
3460 .mode = S_IRUSR,
3461 },
3462 .show = ipr_show_adapter_handle
3463};
3464
3465static struct device_attribute *ipr_dev_attrs[] = {
3466 &ipr_adapter_handle_attr,
3467 NULL,
3468};
3469
3470/**
3471 * ipr_biosparam - Return the HSC mapping
3472 * @sdev: scsi device struct
3473 * @block_device: block device pointer
3474 * @capacity: capacity of the device
3475 * @parm: Array containing returned HSC values.
3476 *
3477 * This function generates the HSC parms that fdisk uses.
3478 * We want to make sure we return something that places partitions
3479 * on 4k boundaries for best performance with the IOA.
3480 *
3481 * Return value:
3482 * 0 on success
3483 **/
3484static int ipr_biosparam(struct scsi_device *sdev,
3485 struct block_device *block_device,
3486 sector_t capacity, int *parm)
3487{
3488 int heads, sectors;
3489 sector_t cylinders;
3490
3491 heads = 128;
3492 sectors = 32;
3493
3494 cylinders = capacity;
3495 sector_div(cylinders, (128 * 32));
3496
3497 /* return result */
3498 parm[0] = heads;
3499 parm[1] = sectors;
3500 parm[2] = cylinders;
3501
3502 return 0;
3503}
3504
35a39691
BK
3505/**
3506 * ipr_find_starget - Find target based on bus/target.
3507 * @starget: scsi target struct
3508 *
3509 * Return value:
3510 * resource entry pointer if found / NULL if not found
3511 **/
3512static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3513{
3514 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3515 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3516 struct ipr_resource_entry *res;
3517
3518 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3519 if ((res->cfgte.res_addr.bus == starget->channel) &&
3520 (res->cfgte.res_addr.target == starget->id) &&
3521 (res->cfgte.res_addr.lun == 0)) {
3522 return res;
3523 }
3524 }
3525
3526 return NULL;
3527}
3528
3529static struct ata_port_info sata_port_info;
3530
3531/**
3532 * ipr_target_alloc - Prepare for commands to a SCSI target
3533 * @starget: scsi target struct
3534 *
3535 * If the device is a SATA device, this function allocates an
3536 * ATA port with libata, else it does nothing.
3537 *
3538 * Return value:
3539 * 0 on success / non-0 on failure
3540 **/
3541static int ipr_target_alloc(struct scsi_target *starget)
3542{
3543 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3545 struct ipr_sata_port *sata_port;
3546 struct ata_port *ap;
3547 struct ipr_resource_entry *res;
3548 unsigned long lock_flags;
3549
3550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3551 res = ipr_find_starget(starget);
3552 starget->hostdata = NULL;
3553
3554 if (res && ipr_is_gata(res)) {
3555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3556 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3557 if (!sata_port)
3558 return -ENOMEM;
3559
3560 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3561 if (ap) {
3562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3563 sata_port->ioa_cfg = ioa_cfg;
3564 sata_port->ap = ap;
3565 sata_port->res = res;
3566
3567 res->sata_port = sata_port;
3568 ap->private_data = sata_port;
3569 starget->hostdata = sata_port;
3570 } else {
3571 kfree(sata_port);
3572 return -ENOMEM;
3573 }
3574 }
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576
3577 return 0;
3578}
3579
3580/**
3581 * ipr_target_destroy - Destroy a SCSI target
3582 * @starget: scsi target struct
3583 *
3584 * If the device was a SATA device, this function frees the libata
3585 * ATA port, else it does nothing.
3586 *
3587 **/
3588static void ipr_target_destroy(struct scsi_target *starget)
3589{
3590 struct ipr_sata_port *sata_port = starget->hostdata;
3591
3592 if (sata_port) {
3593 starget->hostdata = NULL;
3594 ata_sas_port_destroy(sata_port->ap);
3595 kfree(sata_port);
3596 }
3597}
3598
3599/**
3600 * ipr_find_sdev - Find device based on bus/target/lun.
3601 * @sdev: scsi device struct
3602 *
3603 * Return value:
3604 * resource entry pointer if found / NULL if not found
3605 **/
3606static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3607{
3608 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3609 struct ipr_resource_entry *res;
3610
3611 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3612 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3613 (res->cfgte.res_addr.target == sdev->id) &&
3614 (res->cfgte.res_addr.lun == sdev->lun))
3615 return res;
3616 }
3617
3618 return NULL;
3619}
3620
1da177e4
LT
3621/**
3622 * ipr_slave_destroy - Unconfigure a SCSI device
3623 * @sdev: scsi device struct
3624 *
3625 * Return value:
3626 * nothing
3627 **/
3628static void ipr_slave_destroy(struct scsi_device *sdev)
3629{
3630 struct ipr_resource_entry *res;
3631 struct ipr_ioa_cfg *ioa_cfg;
3632 unsigned long lock_flags = 0;
3633
3634 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3635
3636 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3637 res = (struct ipr_resource_entry *) sdev->hostdata;
3638 if (res) {
35a39691
BK
3639 if (res->sata_port)
3640 ata_port_disable(res->sata_port->ap);
1da177e4
LT
3641 sdev->hostdata = NULL;
3642 res->sdev = NULL;
35a39691 3643 res->sata_port = NULL;
1da177e4
LT
3644 }
3645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646}
3647
3648/**
3649 * ipr_slave_configure - Configure a SCSI device
3650 * @sdev: scsi device struct
3651 *
3652 * This function configures the specified scsi device.
3653 *
3654 * Return value:
3655 * 0 on success
3656 **/
3657static int ipr_slave_configure(struct scsi_device *sdev)
3658{
3659 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3660 struct ipr_resource_entry *res;
3661 unsigned long lock_flags = 0;
3662
3663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3664 res = sdev->hostdata;
3665 if (res) {
3666 if (ipr_is_af_dasd_device(res))
3667 sdev->type = TYPE_RAID;
0726ce26 3668 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 3669 sdev->scsi_level = 4;
0726ce26 3670 sdev->no_uld_attach = 1;
3671 }
1da177e4
LT
3672 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 }
e4fbf44e 3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
1da177e4 3677 sdev->allow_restart = 1;
35a39691
BK
3678 if (ipr_is_gata(res) && res->sata_port) {
3679 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3680 ata_sas_slave_configure(sdev, res->sata_port->ap);
3681 } else {
3682 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3683 }
1da177e4
LT
3684 }
3685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3686 return 0;
3687}
3688
35a39691
BK
3689/**
3690 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3691 * @sdev: scsi device struct
3692 *
3693 * This function initializes an ATA port so that future commands
3694 * sent through queuecommand will work.
3695 *
3696 * Return value:
3697 * 0 on success
3698 **/
3699static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3700{
3701 struct ipr_sata_port *sata_port = NULL;
3702 int rc = -ENXIO;
3703
3704 ENTER;
3705 if (sdev->sdev_target)
3706 sata_port = sdev->sdev_target->hostdata;
3707 if (sata_port)
3708 rc = ata_sas_port_init(sata_port->ap);
3709 if (rc)
3710 ipr_slave_destroy(sdev);
3711
3712 LEAVE;
3713 return rc;
3714}
3715
1da177e4
LT
3716/**
3717 * ipr_slave_alloc - Prepare for commands to a device.
3718 * @sdev: scsi device struct
3719 *
3720 * This function saves a pointer to the resource entry
3721 * in the scsi device struct if the device exists. We
3722 * can then use this pointer in ipr_queuecommand when
3723 * handling new commands.
3724 *
3725 * Return value:
692aebfc 3726 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
3727 **/
3728static int ipr_slave_alloc(struct scsi_device *sdev)
3729{
3730 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3731 struct ipr_resource_entry *res;
3732 unsigned long lock_flags;
692aebfc 3733 int rc = -ENXIO;
1da177e4
LT
3734
3735 sdev->hostdata = NULL;
3736
3737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3738
35a39691
BK
3739 res = ipr_find_sdev(sdev);
3740 if (res) {
3741 res->sdev = sdev;
3742 res->add_to_ml = 0;
3743 res->in_erp = 0;
3744 sdev->hostdata = res;
3745 if (!ipr_is_naca_model(res))
3746 res->needs_sync_complete = 1;
3747 rc = 0;
3748 if (ipr_is_gata(res)) {
3749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3750 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
3751 }
3752 }
3753
3754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3755
692aebfc 3756 return rc;
1da177e4
LT
3757}
3758
3759/**
3760 * ipr_eh_host_reset - Reset the host adapter
3761 * @scsi_cmd: scsi command struct
3762 *
3763 * Return value:
3764 * SUCCESS / FAILED
3765 **/
df0ae249 3766static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3767{
3768 struct ipr_ioa_cfg *ioa_cfg;
3769 int rc;
3770
3771 ENTER;
3772 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3773
3774 dev_err(&ioa_cfg->pdev->dev,
3775 "Adapter being reset as a result of error recovery.\n");
3776
3777 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3778 ioa_cfg->sdt_state = GET_DUMP;
3779
3780 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3781
3782 LEAVE;
3783 return rc;
3784}
3785
df0ae249
JG
3786static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3787{
3788 int rc;
3789
3790 spin_lock_irq(cmd->device->host->host_lock);
3791 rc = __ipr_eh_host_reset(cmd);
3792 spin_unlock_irq(cmd->device->host->host_lock);
3793
3794 return rc;
3795}
3796
c6513096
BK
3797/**
3798 * ipr_device_reset - Reset the device
3799 * @ioa_cfg: ioa config struct
3800 * @res: resource entry struct
3801 *
3802 * This function issues a device reset to the affected device.
3803 * If the device is a SCSI device, a LUN reset will be sent
3804 * to the device first. If that does not work, a target reset
35a39691
BK
3805 * will be sent. If the device is a SATA device, a PHY reset will
3806 * be sent.
c6513096
BK
3807 *
3808 * Return value:
3809 * 0 on success / non-zero on failure
3810 **/
3811static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3812 struct ipr_resource_entry *res)
3813{
3814 struct ipr_cmnd *ipr_cmd;
3815 struct ipr_ioarcb *ioarcb;
3816 struct ipr_cmd_pkt *cmd_pkt;
35a39691 3817 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
3818 u32 ioasc;
3819
3820 ENTER;
3821 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3822 ioarcb = &ipr_cmd->ioarcb;
3823 cmd_pkt = &ioarcb->cmd_pkt;
35a39691 3824 regs = &ioarcb->add_data.u.regs;
c6513096
BK
3825
3826 ioarcb->res_handle = res->cfgte.res_handle;
3827 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3828 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
3829 if (ipr_is_gata(res)) {
3830 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3831 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3832 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3833 }
c6513096
BK
3834
3835 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3836 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3837 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
35a39691
BK
3838 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3839 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3840 sizeof(struct ipr_ioasa_gata));
c6513096
BK
3841
3842 LEAVE;
3843 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3844}
3845
35a39691
BK
3846/**
3847 * ipr_sata_reset - Reset the SATA port
cc0680a5 3848 * @link: SATA link to reset
35a39691
BK
3849 * @classes: class of the attached device
3850 *
cc0680a5 3851 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
3852 *
3853 * Return value:
3854 * 0 on success / non-zero on failure
3855 **/
cc0680a5 3856static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 3857 unsigned long deadline)
35a39691 3858{
cc0680a5 3859 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
3860 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3861 struct ipr_resource_entry *res;
3862 unsigned long lock_flags = 0;
3863 int rc = -ENXIO;
3864
3865 ENTER;
3866 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
3867 while(ioa_cfg->in_reset_reload) {
3868 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3869 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3870 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3871 }
3872
35a39691
BK
3873 res = sata_port->res;
3874 if (res) {
3875 rc = ipr_device_reset(ioa_cfg, res);
3876 switch(res->cfgte.proto) {
3877 case IPR_PROTO_SATA:
3878 case IPR_PROTO_SAS_STP:
3879 *classes = ATA_DEV_ATA;
3880 break;
3881 case IPR_PROTO_SATA_ATAPI:
3882 case IPR_PROTO_SAS_STP_ATAPI:
3883 *classes = ATA_DEV_ATAPI;
3884 break;
3885 default:
3886 *classes = ATA_DEV_UNKNOWN;
3887 break;
3888 };
3889 }
3890
3891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3892 LEAVE;
3893 return rc;
3894}
3895
1da177e4
LT
3896/**
3897 * ipr_eh_dev_reset - Reset the device
3898 * @scsi_cmd: scsi command struct
3899 *
3900 * This function issues a device reset to the affected device.
3901 * A LUN reset will be sent to the device first. If that does
3902 * not work, a target reset will be sent.
3903 *
3904 * Return value:
3905 * SUCCESS / FAILED
3906 **/
94d0e7b8 3907static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
3908{
3909 struct ipr_cmnd *ipr_cmd;
3910 struct ipr_ioa_cfg *ioa_cfg;
3911 struct ipr_resource_entry *res;
35a39691
BK
3912 struct ata_port *ap;
3913 int rc = 0;
1da177e4
LT
3914
3915 ENTER;
3916 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3917 res = scsi_cmd->device->hostdata;
3918
eeb88307 3919 if (!res)
1da177e4
LT
3920 return FAILED;
3921
3922 /*
3923 * If we are currently going through reset/reload, return failed. This will force the
3924 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3925 * reset to complete
3926 */
3927 if (ioa_cfg->in_reset_reload)
3928 return FAILED;
3929 if (ioa_cfg->ioa_is_dead)
3930 return FAILED;
3931
3932 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3933 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3934 if (ipr_cmd->scsi_cmd)
3935 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
3936 if (ipr_cmd->qc)
3937 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
3938 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3939 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3940 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3941 }
1da177e4
LT
3942 }
3943 }
3944
3945 res->resetting_device = 1;
fb3ed3cb 3946 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
3947
3948 if (ipr_is_gata(res) && res->sata_port) {
3949 ap = res->sata_port->ap;
3950 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 3951 ata_std_error_handler(ap);
35a39691 3952 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
3953
3954 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3955 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3956 rc = -EIO;
3957 break;
3958 }
3959 }
35a39691
BK
3960 } else
3961 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
3962 res->resetting_device = 0;
3963
1da177e4 3964 LEAVE;
c6513096 3965 return (rc ? FAILED : SUCCESS);
1da177e4
LT
3966}
3967
94d0e7b8
JG
3968static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3969{
3970 int rc;
3971
3972 spin_lock_irq(cmd->device->host->host_lock);
3973 rc = __ipr_eh_dev_reset(cmd);
3974 spin_unlock_irq(cmd->device->host->host_lock);
3975
3976 return rc;
3977}
3978
1da177e4
LT
3979/**
3980 * ipr_bus_reset_done - Op done function for bus reset.
3981 * @ipr_cmd: ipr command struct
3982 *
3983 * This function is the op done function for a bus reset
3984 *
3985 * Return value:
3986 * none
3987 **/
3988static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3989{
3990 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3991 struct ipr_resource_entry *res;
3992
3993 ENTER;
3994 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3995 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3996 sizeof(res->cfgte.res_handle))) {
3997 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3998 break;
3999 }
4000 }
4001
4002 /*
4003 * If abort has not completed, indicate the reset has, else call the
4004 * abort's done function to wake the sleeping eh thread
4005 */
4006 if (ipr_cmd->sibling->sibling)
4007 ipr_cmd->sibling->sibling = NULL;
4008 else
4009 ipr_cmd->sibling->done(ipr_cmd->sibling);
4010
4011 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4012 LEAVE;
4013}
4014
4015/**
4016 * ipr_abort_timeout - An abort task has timed out
4017 * @ipr_cmd: ipr command struct
4018 *
4019 * This function handles when an abort task times out. If this
4020 * happens we issue a bus reset since we have resources tied
4021 * up that must be freed before returning to the midlayer.
4022 *
4023 * Return value:
4024 * none
4025 **/
4026static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4027{
4028 struct ipr_cmnd *reset_cmd;
4029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4030 struct ipr_cmd_pkt *cmd_pkt;
4031 unsigned long lock_flags = 0;
4032
4033 ENTER;
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4035 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4037 return;
4038 }
4039
fb3ed3cb 4040 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4041 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4042 ipr_cmd->sibling = reset_cmd;
4043 reset_cmd->sibling = ipr_cmd;
4044 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4045 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4046 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4047 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4048 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4049
4050 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4052 LEAVE;
4053}
4054
4055/**
4056 * ipr_cancel_op - Cancel specified op
4057 * @scsi_cmd: scsi command struct
4058 *
4059 * This function cancels specified op.
4060 *
4061 * Return value:
4062 * SUCCESS / FAILED
4063 **/
4064static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4065{
4066 struct ipr_cmnd *ipr_cmd;
4067 struct ipr_ioa_cfg *ioa_cfg;
4068 struct ipr_resource_entry *res;
4069 struct ipr_cmd_pkt *cmd_pkt;
4070 u32 ioasc;
4071 int op_found = 0;
4072
4073 ENTER;
4074 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4075 res = scsi_cmd->device->hostdata;
4076
8fa728a2
JG
4077 /* If we are currently going through reset/reload, return failed.
4078 * This will force the mid-layer to call ipr_eh_host_reset,
4079 * which will then go to sleep and wait for the reset to complete
4080 */
4081 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4082 return FAILED;
04d9768f 4083 if (!res || !ipr_is_gscsi(res))
1da177e4
LT
4084 return FAILED;
4085
4086 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4087 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4088 ipr_cmd->done = ipr_scsi_eh_done;
4089 op_found = 1;
4090 break;
4091 }
4092 }
4093
4094 if (!op_found)
4095 return SUCCESS;
4096
4097 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4098 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4099 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4100 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4101 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4102 ipr_cmd->u.sdev = scsi_cmd->device;
4103
fb3ed3cb
BK
4104 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4105 scsi_cmd->cmnd[0]);
1da177e4
LT
4106 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4107 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4108
4109 /*
4110 * If the abort task timed out and we sent a bus reset, we will get
4111 * one the following responses to the abort
4112 */
4113 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4114 ioasc = 0;
4115 ipr_trace;
4116 }
4117
4118 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 4119 if (!ipr_is_naca_model(res))
4120 res->needs_sync_complete = 1;
1da177e4
LT
4121
4122 LEAVE;
4123 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4124}
4125
4126/**
4127 * ipr_eh_abort - Abort a single op
4128 * @scsi_cmd: scsi command struct
4129 *
4130 * Return value:
4131 * SUCCESS / FAILED
4132 **/
4133static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4134{
8fa728a2
JG
4135 unsigned long flags;
4136 int rc;
1da177e4
LT
4137
4138 ENTER;
1da177e4 4139
8fa728a2
JG
4140 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4141 rc = ipr_cancel_op(scsi_cmd);
4142 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4143
4144 LEAVE;
8fa728a2 4145 return rc;
1da177e4
LT
4146}
4147
4148/**
4149 * ipr_handle_other_interrupt - Handle "other" interrupts
4150 * @ioa_cfg: ioa config struct
4151 * @int_reg: interrupt register
4152 *
4153 * Return value:
4154 * IRQ_NONE / IRQ_HANDLED
4155 **/
4156static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4157 volatile u32 int_reg)
4158{
4159 irqreturn_t rc = IRQ_HANDLED;
4160
4161 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4162 /* Mask the interrupt */
4163 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4164
4165 /* Clear the interrupt */
4166 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4167 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4168
4169 list_del(&ioa_cfg->reset_cmd->queue);
4170 del_timer(&ioa_cfg->reset_cmd->timer);
4171 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4172 } else {
4173 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4174 ioa_cfg->ioa_unit_checked = 1;
4175 else
4176 dev_err(&ioa_cfg->pdev->dev,
4177 "Permanent IOA failure. 0x%08X\n", int_reg);
4178
4179 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4180 ioa_cfg->sdt_state = GET_DUMP;
4181
4182 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4183 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4184 }
4185
4186 return rc;
4187}
4188
4189/**
4190 * ipr_isr - Interrupt service routine
4191 * @irq: irq number
4192 * @devp: pointer to ioa config struct
1da177e4
LT
4193 *
4194 * Return value:
4195 * IRQ_NONE / IRQ_HANDLED
4196 **/
7d12e780 4197static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
4198{
4199 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4200 unsigned long lock_flags = 0;
4201 volatile u32 int_reg, int_mask_reg;
4202 u32 ioasc;
4203 u16 cmd_index;
4204 struct ipr_cmnd *ipr_cmd;
4205 irqreturn_t rc = IRQ_NONE;
4206
4207 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4208
4209 /* If interrupts are disabled, ignore the interrupt */
4210 if (!ioa_cfg->allow_interrupts) {
4211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4212 return IRQ_NONE;
4213 }
4214
4215 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4216 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4217
4218 /* If an interrupt on the adapter did not occur, ignore it */
4219 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221 return IRQ_NONE;
4222 }
4223
4224 while (1) {
4225 ipr_cmd = NULL;
4226
4227 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4228 ioa_cfg->toggle_bit) {
4229
4230 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4231 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4232
4233 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4234 ioa_cfg->errors_logged++;
4235 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4236
4237 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4238 ioa_cfg->sdt_state = GET_DUMP;
4239
4240 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4242 return IRQ_HANDLED;
4243 }
4244
4245 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4246
4247 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4248
4249 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4250
4251 list_del(&ipr_cmd->queue);
4252 del_timer(&ipr_cmd->timer);
4253 ipr_cmd->done(ipr_cmd);
4254
4255 rc = IRQ_HANDLED;
4256
4257 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4258 ioa_cfg->hrrq_curr++;
4259 } else {
4260 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4261 ioa_cfg->toggle_bit ^= 1u;
4262 }
4263 }
4264
4265 if (ipr_cmd != NULL) {
4266 /* Clear the PCI interrupt */
4267 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4268 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4269 } else
4270 break;
4271 }
4272
4273 if (unlikely(rc == IRQ_NONE))
4274 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4275
4276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4277 return rc;
4278}
4279
4280/**
4281 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4282 * @ioa_cfg: ioa config struct
4283 * @ipr_cmd: ipr command struct
4284 *
4285 * Return value:
4286 * 0 on success / -1 on failure
4287 **/
4288static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4289 struct ipr_cmnd *ipr_cmd)
4290{
63015bc9
FT
4291 int i, nseg;
4292 struct scatterlist *sg;
1da177e4
LT
4293 u32 length;
4294 u32 ioadl_flags = 0;
4295 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4296 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4297 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4298
63015bc9
FT
4299 length = scsi_bufflen(scsi_cmd);
4300 if (!length)
1da177e4
LT
4301 return 0;
4302
63015bc9
FT
4303 nseg = scsi_dma_map(scsi_cmd);
4304 if (nseg < 0) {
4305 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4306 return -1;
4307 }
51b1c7e1 4308
63015bc9
FT
4309 ipr_cmd->dma_use_sg = nseg;
4310
4311 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4312 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4313 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4314 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4315 ioarcb->write_ioadl_len =
4316 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4317 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4318 ioadl_flags = IPR_IOADL_FLAGS_READ;
4319 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4320 ioarcb->read_ioadl_len =
4321 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4322 }
1da177e4 4323
63015bc9
FT
4324 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4325 ioadl = ioarcb->add_data.u.ioadl;
4326 ioarcb->write_ioadl_addr =
4327 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4328 offsetof(struct ipr_ioarcb, add_data));
4329 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4330 }
1da177e4 4331
63015bc9
FT
4332 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4333 ioadl[i].flags_and_data_len =
4334 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4335 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
4336 }
4337
63015bc9
FT
4338 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4339 return 0;
1da177e4
LT
4340}
4341
4342/**
4343 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4344 * @scsi_cmd: scsi command struct
4345 *
4346 * Return value:
4347 * task attributes
4348 **/
4349static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4350{
4351 u8 tag[2];
4352 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4353
4354 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4355 switch (tag[0]) {
4356 case MSG_SIMPLE_TAG:
4357 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4358 break;
4359 case MSG_HEAD_TAG:
4360 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4361 break;
4362 case MSG_ORDERED_TAG:
4363 rc = IPR_FLAGS_LO_ORDERED_TASK;
4364 break;
4365 };
4366 }
4367
4368 return rc;
4369}
4370
4371/**
4372 * ipr_erp_done - Process completion of ERP for a device
4373 * @ipr_cmd: ipr command struct
4374 *
4375 * This function copies the sense buffer into the scsi_cmd
4376 * struct and pushes the scsi_done function.
4377 *
4378 * Return value:
4379 * nothing
4380 **/
4381static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4382{
4383 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4384 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4385 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4386 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4387
4388 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4389 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
4390 scmd_printk(KERN_ERR, scsi_cmd,
4391 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
4392 } else {
4393 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4394 SCSI_SENSE_BUFFERSIZE);
4395 }
4396
4397 if (res) {
ee0a90fa 4398 if (!ipr_is_naca_model(res))
4399 res->needs_sync_complete = 1;
1da177e4
LT
4400 res->in_erp = 0;
4401 }
63015bc9 4402 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
4403 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4404 scsi_cmd->scsi_done(scsi_cmd);
4405}
4406
4407/**
4408 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4409 * @ipr_cmd: ipr command struct
4410 *
4411 * Return value:
4412 * none
4413 **/
4414static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4415{
51b1c7e1
BK
4416 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4417 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4418 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
1da177e4
LT
4419
4420 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4421 ioarcb->write_data_transfer_length = 0;
4422 ioarcb->read_data_transfer_length = 0;
4423 ioarcb->write_ioadl_len = 0;
4424 ioarcb->read_ioadl_len = 0;
4425 ioasa->ioasc = 0;
4426 ioasa->residual_data_len = 0;
51b1c7e1
BK
4427 ioarcb->write_ioadl_addr =
4428 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4429 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
1da177e4
LT
4430}
4431
4432/**
4433 * ipr_erp_request_sense - Send request sense to a device
4434 * @ipr_cmd: ipr command struct
4435 *
4436 * This function sends a request sense to a device as a result
4437 * of a check condition.
4438 *
4439 * Return value:
4440 * nothing
4441 **/
4442static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4443{
4444 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4445 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4446
4447 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4448 ipr_erp_done(ipr_cmd);
4449 return;
4450 }
4451
4452 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4453
4454 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4455 cmd_pkt->cdb[0] = REQUEST_SENSE;
4456 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4457 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4458 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4459 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4460
4461 ipr_cmd->ioadl[0].flags_and_data_len =
4462 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4463 ipr_cmd->ioadl[0].address =
4464 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4465
4466 ipr_cmd->ioarcb.read_ioadl_len =
4467 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4468 ipr_cmd->ioarcb.read_data_transfer_length =
4469 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4470
4471 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4472 IPR_REQUEST_SENSE_TIMEOUT * 2);
4473}
4474
4475/**
4476 * ipr_erp_cancel_all - Send cancel all to a device
4477 * @ipr_cmd: ipr command struct
4478 *
4479 * This function sends a cancel all to a device to clear the
4480 * queue. If we are running TCQ on the device, QERR is set to 1,
4481 * which means all outstanding ops have been dropped on the floor.
4482 * Cancel all will return them to us.
4483 *
4484 * Return value:
4485 * nothing
4486 **/
4487static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4488{
4489 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4490 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4491 struct ipr_cmd_pkt *cmd_pkt;
4492
4493 res->in_erp = 1;
4494
4495 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4496
4497 if (!scsi_get_tag_type(scsi_cmd->device)) {
4498 ipr_erp_request_sense(ipr_cmd);
4499 return;
4500 }
4501
4502 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4503 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4504 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4505
4506 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4507 IPR_CANCEL_ALL_TIMEOUT);
4508}
4509
4510/**
4511 * ipr_dump_ioasa - Dump contents of IOASA
4512 * @ioa_cfg: ioa config struct
4513 * @ipr_cmd: ipr command struct
fe964d0a 4514 * @res: resource entry struct
1da177e4
LT
4515 *
4516 * This function is invoked by the interrupt handler when ops
4517 * fail. It will log the IOASA if appropriate. Only called
4518 * for GPDD ops.
4519 *
4520 * Return value:
4521 * none
4522 **/
4523static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 4524 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
4525{
4526 int i;
4527 u16 data_len;
b0692dd4 4528 u32 ioasc, fd_ioasc;
1da177e4
LT
4529 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4530 __be32 *ioasa_data = (__be32 *)ioasa;
4531 int error_index;
4532
4533 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
b0692dd4 4534 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
4535
4536 if (0 == ioasc)
4537 return;
4538
4539 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4540 return;
4541
b0692dd4
BK
4542 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4543 error_index = ipr_get_error(fd_ioasc);
4544 else
4545 error_index = ipr_get_error(ioasc);
1da177e4
LT
4546
4547 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4548 /* Don't log an error if the IOA already logged one */
4549 if (ioasa->ilid != 0)
4550 return;
4551
cc9bd5d4
BK
4552 if (!ipr_is_gscsi(res))
4553 return;
4554
1da177e4
LT
4555 if (ipr_error_table[error_index].log_ioasa == 0)
4556 return;
4557 }
4558
fe964d0a 4559 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
4560
4561 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4562 data_len = sizeof(struct ipr_ioasa);
4563 else
4564 data_len = be16_to_cpu(ioasa->ret_stat_len);
4565
4566 ipr_err("IOASA Dump:\n");
4567
4568 for (i = 0; i < data_len / 4; i += 4) {
4569 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4570 be32_to_cpu(ioasa_data[i]),
4571 be32_to_cpu(ioasa_data[i+1]),
4572 be32_to_cpu(ioasa_data[i+2]),
4573 be32_to_cpu(ioasa_data[i+3]));
4574 }
4575}
4576
4577/**
4578 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4579 * @ioasa: IOASA
4580 * @sense_buf: sense data buffer
4581 *
4582 * Return value:
4583 * none
4584 **/
4585static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4586{
4587 u32 failing_lba;
4588 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4589 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4590 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4591 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4592
4593 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4594
4595 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4596 return;
4597
4598 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4599
4600 if (ipr_is_vset_device(res) &&
4601 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4602 ioasa->u.vset.failing_lba_hi != 0) {
4603 sense_buf[0] = 0x72;
4604 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4605 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4606 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4607
4608 sense_buf[7] = 12;
4609 sense_buf[8] = 0;
4610 sense_buf[9] = 0x0A;
4611 sense_buf[10] = 0x80;
4612
4613 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4614
4615 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4616 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4617 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4618 sense_buf[15] = failing_lba & 0x000000ff;
4619
4620 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4621
4622 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4623 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4624 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4625 sense_buf[19] = failing_lba & 0x000000ff;
4626 } else {
4627 sense_buf[0] = 0x70;
4628 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4629 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4630 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4631
4632 /* Illegal request */
4633 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4634 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4635 sense_buf[7] = 10; /* additional length */
4636
4637 /* IOARCB was in error */
4638 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4639 sense_buf[15] = 0xC0;
4640 else /* Parameter data was invalid */
4641 sense_buf[15] = 0x80;
4642
4643 sense_buf[16] =
4644 ((IPR_FIELD_POINTER_MASK &
4645 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4646 sense_buf[17] =
4647 (IPR_FIELD_POINTER_MASK &
4648 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4649 } else {
4650 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4651 if (ipr_is_vset_device(res))
4652 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4653 else
4654 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4655
4656 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4657 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4658 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4659 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4660 sense_buf[6] = failing_lba & 0x000000ff;
4661 }
4662
4663 sense_buf[7] = 6; /* additional length */
4664 }
4665 }
4666}
4667
ee0a90fa 4668/**
4669 * ipr_get_autosense - Copy autosense data to sense buffer
4670 * @ipr_cmd: ipr command struct
4671 *
4672 * This function copies the autosense buffer to the buffer
4673 * in the scsi_cmd, if there is autosense available.
4674 *
4675 * Return value:
4676 * 1 if autosense was available / 0 if not
4677 **/
4678static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4679{
4680 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4681
117d2ce1 4682 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 4683 return 0;
4684
4685 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4686 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4687 SCSI_SENSE_BUFFERSIZE));
4688 return 1;
4689}
4690
1da177e4
LT
4691/**
4692 * ipr_erp_start - Process an error response for a SCSI op
4693 * @ioa_cfg: ioa config struct
4694 * @ipr_cmd: ipr command struct
4695 *
4696 * This function determines whether or not to initiate ERP
4697 * on the affected device.
4698 *
4699 * Return value:
4700 * nothing
4701 **/
4702static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4703 struct ipr_cmnd *ipr_cmd)
4704{
4705 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4706 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4707 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
8a048994 4708 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
4709
4710 if (!res) {
4711 ipr_scsi_eh_done(ipr_cmd);
4712 return;
4713 }
4714
8a048994 4715 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
4716 ipr_gen_sense(ipr_cmd);
4717
cc9bd5d4
BK
4718 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4719
8a048994 4720 switch (masked_ioasc) {
1da177e4 4721 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 4722 if (ipr_is_naca_model(res))
4723 scsi_cmd->result |= (DID_ABORT << 16);
4724 else
4725 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
4726 break;
4727 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 4728 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
4729 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4730 break;
4731 case IPR_IOASC_HW_SEL_TIMEOUT:
4732 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 4733 if (!ipr_is_naca_model(res))
4734 res->needs_sync_complete = 1;
1da177e4
LT
4735 break;
4736 case IPR_IOASC_SYNC_REQUIRED:
4737 if (!res->in_erp)
4738 res->needs_sync_complete = 1;
4739 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4740 break;
4741 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 4742 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
4743 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4744 break;
4745 case IPR_IOASC_BUS_WAS_RESET:
4746 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4747 /*
4748 * Report the bus reset and ask for a retry. The device
4749 * will give CC/UA the next command.
4750 */
4751 if (!res->resetting_device)
4752 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4753 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4754 if (!ipr_is_naca_model(res))
4755 res->needs_sync_complete = 1;
1da177e4
LT
4756 break;
4757 case IPR_IOASC_HW_DEV_BUS_STATUS:
4758 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4759 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 4760 if (!ipr_get_autosense(ipr_cmd)) {
4761 if (!ipr_is_naca_model(res)) {
4762 ipr_erp_cancel_all(ipr_cmd);
4763 return;
4764 }
4765 }
1da177e4 4766 }
ee0a90fa 4767 if (!ipr_is_naca_model(res))
4768 res->needs_sync_complete = 1;
1da177e4
LT
4769 break;
4770 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4771 break;
4772 default:
5b7304fb
BK
4773 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4774 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 4775 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
4776 res->needs_sync_complete = 1;
4777 break;
4778 }
4779
63015bc9 4780 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
4781 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4782 scsi_cmd->scsi_done(scsi_cmd);
4783}
4784
4785/**
4786 * ipr_scsi_done - mid-layer done function
4787 * @ipr_cmd: ipr command struct
4788 *
4789 * This function is invoked by the interrupt handler for
4790 * ops generated by the SCSI mid-layer
4791 *
4792 * Return value:
4793 * none
4794 **/
4795static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4796{
4797 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4798 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4799 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4800
63015bc9 4801 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
1da177e4
LT
4802
4803 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 4804 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
4805 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4806 scsi_cmd->scsi_done(scsi_cmd);
4807 } else
4808 ipr_erp_start(ioa_cfg, ipr_cmd);
4809}
4810
1da177e4
LT
4811/**
4812 * ipr_queuecommand - Queue a mid-layer request
4813 * @scsi_cmd: scsi command struct
4814 * @done: done function
4815 *
4816 * This function queues a request generated by the mid-layer.
4817 *
4818 * Return value:
4819 * 0 on success
4820 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4821 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4822 **/
4823static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4824 void (*done) (struct scsi_cmnd *))
4825{
4826 struct ipr_ioa_cfg *ioa_cfg;
4827 struct ipr_resource_entry *res;
4828 struct ipr_ioarcb *ioarcb;
4829 struct ipr_cmnd *ipr_cmd;
4830 int rc = 0;
4831
4832 scsi_cmd->scsi_done = done;
4833 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4834 res = scsi_cmd->device->hostdata;
4835 scsi_cmd->result = (DID_OK << 16);
4836
4837 /*
4838 * We are currently blocking all devices due to a host reset
4839 * We have told the host to stop giving us new requests, but
4840 * ERP ops don't count. FIXME
4841 */
4842 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4843 return SCSI_MLQUEUE_HOST_BUSY;
4844
4845 /*
4846 * FIXME - Create scsi_set_host_offline interface
4847 * and the ioa_is_dead check can be removed
4848 */
4849 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4850 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4851 scsi_cmd->result = (DID_NO_CONNECT << 16);
4852 scsi_cmd->scsi_done(scsi_cmd);
4853 return 0;
4854 }
4855
35a39691
BK
4856 if (ipr_is_gata(res) && res->sata_port)
4857 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4858
1da177e4
LT
4859 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4860 ioarcb = &ipr_cmd->ioarcb;
4861 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4862
4863 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4864 ipr_cmd->scsi_cmd = scsi_cmd;
4865 ioarcb->res_handle = res->cfgte.res_handle;
4866 ipr_cmd->done = ipr_scsi_done;
4867 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4868
4869 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4870 if (scsi_cmd->underflow == 0)
4871 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4872
4873 if (res->needs_sync_complete) {
4874 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4875 res->needs_sync_complete = 0;
4876 }
4877
4878 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4879 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4880 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4881 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4882 }
4883
4884 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4885 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4886 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4887
1da177e4
LT
4888 if (likely(rc == 0))
4889 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4890
4891 if (likely(rc == 0)) {
4892 mb();
4893 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4894 ioa_cfg->regs.ioarrin_reg);
4895 } else {
4896 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4897 return SCSI_MLQUEUE_HOST_BUSY;
4898 }
4899
4900 return 0;
4901}
4902
35a39691
BK
4903/**
4904 * ipr_ioctl - IOCTL handler
4905 * @sdev: scsi device struct
4906 * @cmd: IOCTL cmd
4907 * @arg: IOCTL arg
4908 *
4909 * Return value:
4910 * 0 on success / other on failure
4911 **/
bd705f2d 4912static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
4913{
4914 struct ipr_resource_entry *res;
4915
4916 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
4917 if (res && ipr_is_gata(res)) {
4918 if (cmd == HDIO_GET_IDENTITY)
4919 return -ENOTTY;
35a39691 4920 return ata_scsi_ioctl(sdev, cmd, arg);
0ce3a7e5 4921 }
35a39691
BK
4922
4923 return -EINVAL;
4924}
4925
1da177e4
LT
4926/**
4927 * ipr_info - Get information about the card/driver
4928 * @scsi_host: scsi host struct
4929 *
4930 * Return value:
4931 * pointer to buffer with description string
4932 **/
4933static const char * ipr_ioa_info(struct Scsi_Host *host)
4934{
4935 static char buffer[512];
4936 struct ipr_ioa_cfg *ioa_cfg;
4937 unsigned long lock_flags = 0;
4938
4939 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4940
4941 spin_lock_irqsave(host->host_lock, lock_flags);
4942 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4943 spin_unlock_irqrestore(host->host_lock, lock_flags);
4944
4945 return buffer;
4946}
4947
4948static struct scsi_host_template driver_template = {
4949 .module = THIS_MODULE,
4950 .name = "IPR",
4951 .info = ipr_ioa_info,
35a39691 4952 .ioctl = ipr_ioctl,
1da177e4
LT
4953 .queuecommand = ipr_queuecommand,
4954 .eh_abort_handler = ipr_eh_abort,
4955 .eh_device_reset_handler = ipr_eh_dev_reset,
4956 .eh_host_reset_handler = ipr_eh_host_reset,
4957 .slave_alloc = ipr_slave_alloc,
4958 .slave_configure = ipr_slave_configure,
4959 .slave_destroy = ipr_slave_destroy,
35a39691
BK
4960 .target_alloc = ipr_target_alloc,
4961 .target_destroy = ipr_target_destroy,
1da177e4
LT
4962 .change_queue_depth = ipr_change_queue_depth,
4963 .change_queue_type = ipr_change_queue_type,
4964 .bios_param = ipr_biosparam,
4965 .can_queue = IPR_MAX_COMMANDS,
4966 .this_id = -1,
4967 .sg_tablesize = IPR_MAX_SGLIST,
4968 .max_sectors = IPR_IOA_MAX_SECTORS,
4969 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4970 .use_clustering = ENABLE_CLUSTERING,
4971 .shost_attrs = ipr_ioa_attrs,
4972 .sdev_attrs = ipr_dev_attrs,
4973 .proc_name = IPR_NAME
4974};
4975
35a39691
BK
4976/**
4977 * ipr_ata_phy_reset - libata phy_reset handler
4978 * @ap: ata port to reset
4979 *
4980 **/
4981static void ipr_ata_phy_reset(struct ata_port *ap)
4982{
4983 unsigned long flags;
4984 struct ipr_sata_port *sata_port = ap->private_data;
4985 struct ipr_resource_entry *res = sata_port->res;
4986 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4987 int rc;
4988
4989 ENTER;
4990 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4991 while(ioa_cfg->in_reset_reload) {
4992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4993 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4994 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4995 }
4996
4997 if (!ioa_cfg->allow_cmds)
4998 goto out_unlock;
4999
5000 rc = ipr_device_reset(ioa_cfg, res);
5001
5002 if (rc) {
ac8869d5 5003 ata_port_disable(ap);
35a39691
BK
5004 goto out_unlock;
5005 }
5006
5007 switch(res->cfgte.proto) {
5008 case IPR_PROTO_SATA:
5009 case IPR_PROTO_SAS_STP:
9af5c9c9 5010 ap->link.device[0].class = ATA_DEV_ATA;
35a39691
BK
5011 break;
5012 case IPR_PROTO_SATA_ATAPI:
5013 case IPR_PROTO_SAS_STP_ATAPI:
9af5c9c9 5014 ap->link.device[0].class = ATA_DEV_ATAPI;
35a39691
BK
5015 break;
5016 default:
9af5c9c9 5017 ap->link.device[0].class = ATA_DEV_UNKNOWN;
ac8869d5 5018 ata_port_disable(ap);
35a39691
BK
5019 break;
5020 };
5021
5022out_unlock:
5023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5024 LEAVE;
5025}
5026
5027/**
5028 * ipr_ata_post_internal - Cleanup after an internal command
5029 * @qc: ATA queued command
5030 *
5031 * Return value:
5032 * none
5033 **/
5034static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5035{
5036 struct ipr_sata_port *sata_port = qc->ap->private_data;
5037 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5038 struct ipr_cmnd *ipr_cmd;
5039 unsigned long flags;
5040
5041 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
5042 while(ioa_cfg->in_reset_reload) {
5043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5044 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5045 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5046 }
5047
35a39691
BK
5048 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5049 if (ipr_cmd->qc == qc) {
5050 ipr_device_reset(ioa_cfg, sata_port->res);
5051 break;
5052 }
5053 }
5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5055}
5056
35a39691
BK
5057/**
5058 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5059 * @regs: destination
5060 * @tf: source ATA taskfile
5061 *
5062 * Return value:
5063 * none
5064 **/
5065static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5066 struct ata_taskfile *tf)
5067{
5068 regs->feature = tf->feature;
5069 regs->nsect = tf->nsect;
5070 regs->lbal = tf->lbal;
5071 regs->lbam = tf->lbam;
5072 regs->lbah = tf->lbah;
5073 regs->device = tf->device;
5074 regs->command = tf->command;
5075 regs->hob_feature = tf->hob_feature;
5076 regs->hob_nsect = tf->hob_nsect;
5077 regs->hob_lbal = tf->hob_lbal;
5078 regs->hob_lbam = tf->hob_lbam;
5079 regs->hob_lbah = tf->hob_lbah;
5080 regs->ctl = tf->ctl;
5081}
5082
5083/**
5084 * ipr_sata_done - done function for SATA commands
5085 * @ipr_cmd: ipr command struct
5086 *
5087 * This function is invoked by the interrupt handler for
5088 * ops generated by the SCSI mid-layer to SATA devices
5089 *
5090 * Return value:
5091 * none
5092 **/
5093static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5094{
5095 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5096 struct ata_queued_cmd *qc = ipr_cmd->qc;
5097 struct ipr_sata_port *sata_port = qc->ap->private_data;
5098 struct ipr_resource_entry *res = sata_port->res;
5099 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5100
5101 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5102 sizeof(struct ipr_ioasa_gata));
5103 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5104
5105 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5106 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5107 res->cfgte.res_addr.target);
5108
5109 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5110 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5111 else
5112 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5113 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5114 ata_qc_complete(qc);
5115}
5116
5117/**
5118 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5119 * @ipr_cmd: ipr command struct
5120 * @qc: ATA queued command
5121 *
5122 **/
5123static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5124 struct ata_queued_cmd *qc)
5125{
5126 u32 ioadl_flags = 0;
5127 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5128 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3be6cbd7 5129 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 5130 int len = qc->nbytes;
35a39691 5131 struct scatterlist *sg;
ff2aeb1e 5132 unsigned int si;
35a39691
BK
5133
5134 if (len == 0)
5135 return;
5136
5137 if (qc->dma_dir == DMA_TO_DEVICE) {
5138 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5139 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5140 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5141 ioarcb->write_ioadl_len =
5142 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5143 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5144 ioadl_flags = IPR_IOADL_FLAGS_READ;
5145 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5146 ioarcb->read_ioadl_len =
5147 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5148 }
5149
ff2aeb1e 5150 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
5151 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5152 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
5153
5154 last_ioadl = ioadl;
5155 ioadl++;
35a39691 5156 }
3be6cbd7
JG
5157
5158 if (likely(last_ioadl))
5159 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
5160}
5161
5162/**
5163 * ipr_qc_issue - Issue a SATA qc to a device
5164 * @qc: queued command
5165 *
5166 * Return value:
5167 * 0 if success
5168 **/
5169static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5170{
5171 struct ata_port *ap = qc->ap;
5172 struct ipr_sata_port *sata_port = ap->private_data;
5173 struct ipr_resource_entry *res = sata_port->res;
5174 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5175 struct ipr_cmnd *ipr_cmd;
5176 struct ipr_ioarcb *ioarcb;
5177 struct ipr_ioarcb_ata_regs *regs;
5178
5179 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 5180 return AC_ERR_SYSTEM;
35a39691
BK
5181
5182 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5183 ioarcb = &ipr_cmd->ioarcb;
5184 regs = &ioarcb->add_data.u.regs;
5185
5186 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5187 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5188
5189 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5190 ipr_cmd->qc = qc;
5191 ipr_cmd->done = ipr_sata_done;
5192 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5193 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5194 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5195 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 5196 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691
BK
5197
5198 ipr_build_ata_ioadl(ipr_cmd, qc);
5199 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5200 ipr_copy_sata_tf(regs, &qc->tf);
5201 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5202 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5203
5204 switch (qc->tf.protocol) {
5205 case ATA_PROT_NODATA:
5206 case ATA_PROT_PIO:
5207 break;
5208
5209 case ATA_PROT_DMA:
5210 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5211 break;
5212
0dc36888
TH
5213 case ATAPI_PROT_PIO:
5214 case ATAPI_PROT_NODATA:
35a39691
BK
5215 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5216 break;
5217
0dc36888 5218 case ATAPI_PROT_DMA:
35a39691
BK
5219 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5220 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5221 break;
5222
5223 default:
5224 WARN_ON(1);
0feeed82 5225 return AC_ERR_INVALID;
35a39691
BK
5226 }
5227
5228 mb();
5229 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5230 ioa_cfg->regs.ioarrin_reg);
5231 return 0;
5232}
5233
4c9bf4e7
TH
5234/**
5235 * ipr_qc_fill_rtf - Read result TF
5236 * @qc: ATA queued command
5237 *
5238 * Return value:
5239 * true
5240 **/
5241static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
5242{
5243 struct ipr_sata_port *sata_port = qc->ap->private_data;
5244 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5245 struct ata_taskfile *tf = &qc->result_tf;
5246
5247 tf->feature = g->error;
5248 tf->nsect = g->nsect;
5249 tf->lbal = g->lbal;
5250 tf->lbam = g->lbam;
5251 tf->lbah = g->lbah;
5252 tf->device = g->device;
5253 tf->command = g->status;
5254 tf->hob_nsect = g->hob_nsect;
5255 tf->hob_lbal = g->hob_lbal;
5256 tf->hob_lbam = g->hob_lbam;
5257 tf->hob_lbah = g->hob_lbah;
5258 tf->ctl = g->alt_status;
5259
5260 return true;
5261}
5262
35a39691 5263static struct ata_port_operations ipr_sata_ops = {
35a39691 5264 .phy_reset = ipr_ata_phy_reset,
a1efdaba 5265 .hardreset = ipr_sata_reset,
35a39691 5266 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
5267 .qc_prep = ata_noop_qc_prep,
5268 .qc_issue = ipr_qc_issue,
4c9bf4e7 5269 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
5270 .port_start = ata_sas_port_start,
5271 .port_stop = ata_sas_port_stop
5272};
5273
5274static struct ata_port_info sata_port_info = {
5275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5276 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5277 .pio_mask = 0x10, /* pio4 */
5278 .mwdma_mask = 0x07,
5279 .udma_mask = 0x7f, /* udma0-6 */
5280 .port_ops = &ipr_sata_ops
5281};
5282
1da177e4
LT
5283#ifdef CONFIG_PPC_PSERIES
5284static const u16 ipr_blocked_processors[] = {
5285 PV_NORTHSTAR,
5286 PV_PULSAR,
5287 PV_POWER4,
5288 PV_ICESTAR,
5289 PV_SSTAR,
5290 PV_POWER4p,
5291 PV_630,
5292 PV_630p
5293};
5294
5295/**
5296 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5297 * @ioa_cfg: ioa cfg struct
5298 *
5299 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5300 * certain pSeries hardware. This function determines if the given
5301 * adapter is in one of these confgurations or not.
5302 *
5303 * Return value:
5304 * 1 if adapter is not supported / 0 if adapter is supported
5305 **/
5306static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5307{
1da177e4
LT
5308 int i;
5309
44c10138
AK
5310 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
5311 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5312 if (__is_processor(ipr_blocked_processors[i]))
5313 return 1;
1da177e4
LT
5314 }
5315 }
5316 return 0;
5317}
5318#else
5319#define ipr_invalid_adapter(ioa_cfg) 0
5320#endif
5321
5322/**
5323 * ipr_ioa_bringdown_done - IOA bring down completion.
5324 * @ipr_cmd: ipr command struct
5325 *
5326 * This function processes the completion of an adapter bring down.
5327 * It wakes any reset sleepers.
5328 *
5329 * Return value:
5330 * IPR_RC_JOB_RETURN
5331 **/
5332static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5333{
5334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5335
5336 ENTER;
5337 ioa_cfg->in_reset_reload = 0;
5338 ioa_cfg->reset_retries = 0;
5339 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5340 wake_up_all(&ioa_cfg->reset_wait_q);
5341
5342 spin_unlock_irq(ioa_cfg->host->host_lock);
5343 scsi_unblock_requests(ioa_cfg->host);
5344 spin_lock_irq(ioa_cfg->host->host_lock);
5345 LEAVE;
5346
5347 return IPR_RC_JOB_RETURN;
5348}
5349
5350/**
5351 * ipr_ioa_reset_done - IOA reset completion.
5352 * @ipr_cmd: ipr command struct
5353 *
5354 * This function processes the completion of an adapter reset.
5355 * It schedules any necessary mid-layer add/removes and
5356 * wakes any reset sleepers.
5357 *
5358 * Return value:
5359 * IPR_RC_JOB_RETURN
5360 **/
5361static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5362{
5363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5364 struct ipr_resource_entry *res;
5365 struct ipr_hostrcb *hostrcb, *temp;
5366 int i = 0;
5367
5368 ENTER;
5369 ioa_cfg->in_reset_reload = 0;
5370 ioa_cfg->allow_cmds = 1;
5371 ioa_cfg->reset_cmd = NULL;
3d1d0da6 5372 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
5373
5374 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5375 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5376 ipr_trace;
5377 break;
5378 }
5379 }
5380 schedule_work(&ioa_cfg->work_q);
5381
5382 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5383 list_del(&hostrcb->queue);
5384 if (i++ < IPR_NUM_LOG_HCAMS)
5385 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5386 else
5387 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5388 }
5389
6bb04170 5390 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
5391 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5392
5393 ioa_cfg->reset_retries = 0;
5394 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5395 wake_up_all(&ioa_cfg->reset_wait_q);
5396
5397 spin_unlock_irq(ioa_cfg->host->host_lock);
5398 scsi_unblock_requests(ioa_cfg->host);
5399 spin_lock_irq(ioa_cfg->host->host_lock);
5400
5401 if (!ioa_cfg->allow_cmds)
5402 scsi_block_requests(ioa_cfg->host);
5403
5404 LEAVE;
5405 return IPR_RC_JOB_RETURN;
5406}
5407
5408/**
5409 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5410 * @supported_dev: supported device struct
5411 * @vpids: vendor product id struct
5412 *
5413 * Return value:
5414 * none
5415 **/
5416static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5417 struct ipr_std_inq_vpids *vpids)
5418{
5419 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5420 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5421 supported_dev->num_records = 1;
5422 supported_dev->data_length =
5423 cpu_to_be16(sizeof(struct ipr_supported_device));
5424 supported_dev->reserved = 0;
5425}
5426
5427/**
5428 * ipr_set_supported_devs - Send Set Supported Devices for a device
5429 * @ipr_cmd: ipr command struct
5430 *
5431 * This function send a Set Supported Devices to the adapter
5432 *
5433 * Return value:
5434 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5435 **/
5436static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5437{
5438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5439 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5440 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5441 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5442 struct ipr_resource_entry *res = ipr_cmd->u.res;
5443
5444 ipr_cmd->job_step = ipr_ioa_reset_done;
5445
5446 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 5447 if (!ipr_is_scsi_disk(res))
1da177e4
LT
5448 continue;
5449
5450 ipr_cmd->u.res = res;
5451 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5452
5453 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5454 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5455 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5456
5457 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5458 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5459 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5460
5461 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5462 sizeof(struct ipr_supported_device));
5463 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5464 offsetof(struct ipr_misc_cbs, supp_dev));
5465 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5466 ioarcb->write_data_transfer_length =
5467 cpu_to_be32(sizeof(struct ipr_supported_device));
5468
5469 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5470 IPR_SET_SUP_DEVICE_TIMEOUT);
5471
5472 ipr_cmd->job_step = ipr_set_supported_devs;
5473 return IPR_RC_JOB_RETURN;
5474 }
5475
5476 return IPR_RC_JOB_CONTINUE;
5477}
5478
62275040 5479/**
5480 * ipr_setup_write_cache - Disable write cache if needed
5481 * @ipr_cmd: ipr command struct
5482 *
5483 * This function sets up adapters write cache to desired setting
5484 *
5485 * Return value:
5486 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5487 **/
5488static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5489{
5490 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5491
5492 ipr_cmd->job_step = ipr_set_supported_devs;
5493 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5494 struct ipr_resource_entry, queue);
5495
5496 if (ioa_cfg->cache_state != CACHE_DISABLED)
5497 return IPR_RC_JOB_CONTINUE;
5498
5499 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5500 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5501 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5502 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5503
5504 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5505
5506 return IPR_RC_JOB_RETURN;
5507}
5508
1da177e4
LT
5509/**
5510 * ipr_get_mode_page - Locate specified mode page
5511 * @mode_pages: mode page buffer
5512 * @page_code: page code to find
5513 * @len: minimum required length for mode page
5514 *
5515 * Return value:
5516 * pointer to mode page / NULL on failure
5517 **/
5518static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5519 u32 page_code, u32 len)
5520{
5521 struct ipr_mode_page_hdr *mode_hdr;
5522 u32 page_length;
5523 u32 length;
5524
5525 if (!mode_pages || (mode_pages->hdr.length == 0))
5526 return NULL;
5527
5528 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5529 mode_hdr = (struct ipr_mode_page_hdr *)
5530 (mode_pages->data + mode_pages->hdr.block_desc_len);
5531
5532 while (length) {
5533 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5534 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5535 return mode_hdr;
5536 break;
5537 } else {
5538 page_length = (sizeof(struct ipr_mode_page_hdr) +
5539 mode_hdr->page_length);
5540 length -= page_length;
5541 mode_hdr = (struct ipr_mode_page_hdr *)
5542 ((unsigned long)mode_hdr + page_length);
5543 }
5544 }
5545 return NULL;
5546}
5547
5548/**
5549 * ipr_check_term_power - Check for term power errors
5550 * @ioa_cfg: ioa config struct
5551 * @mode_pages: IOAFP mode pages buffer
5552 *
5553 * Check the IOAFP's mode page 28 for term power errors
5554 *
5555 * Return value:
5556 * nothing
5557 **/
5558static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5559 struct ipr_mode_pages *mode_pages)
5560{
5561 int i;
5562 int entry_length;
5563 struct ipr_dev_bus_entry *bus;
5564 struct ipr_mode_page28 *mode_page;
5565
5566 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5567 sizeof(struct ipr_mode_page28));
5568
5569 entry_length = mode_page->entry_length;
5570
5571 bus = mode_page->bus;
5572
5573 for (i = 0; i < mode_page->num_entries; i++) {
5574 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5575 dev_err(&ioa_cfg->pdev->dev,
5576 "Term power is absent on scsi bus %d\n",
5577 bus->res_addr.bus);
5578 }
5579
5580 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5581 }
5582}
5583
5584/**
5585 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5586 * @ioa_cfg: ioa config struct
5587 *
5588 * Looks through the config table checking for SES devices. If
5589 * the SES device is in the SES table indicating a maximum SCSI
5590 * bus speed, the speed is limited for the bus.
5591 *
5592 * Return value:
5593 * none
5594 **/
5595static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5596{
5597 u32 max_xfer_rate;
5598 int i;
5599
5600 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5601 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5602 ioa_cfg->bus_attr[i].bus_width);
5603
5604 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5605 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5606 }
5607}
5608
5609/**
5610 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5611 * @ioa_cfg: ioa config struct
5612 * @mode_pages: mode page 28 buffer
5613 *
5614 * Updates mode page 28 based on driver configuration
5615 *
5616 * Return value:
5617 * none
5618 **/
5619static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5620 struct ipr_mode_pages *mode_pages)
5621{
5622 int i, entry_length;
5623 struct ipr_dev_bus_entry *bus;
5624 struct ipr_bus_attributes *bus_attr;
5625 struct ipr_mode_page28 *mode_page;
5626
5627 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5628 sizeof(struct ipr_mode_page28));
5629
5630 entry_length = mode_page->entry_length;
5631
5632 /* Loop for each device bus entry */
5633 for (i = 0, bus = mode_page->bus;
5634 i < mode_page->num_entries;
5635 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5636 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5637 dev_err(&ioa_cfg->pdev->dev,
5638 "Invalid resource address reported: 0x%08X\n",
5639 IPR_GET_PHYS_LOC(bus->res_addr));
5640 continue;
5641 }
5642
5643 bus_attr = &ioa_cfg->bus_attr[i];
5644 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5645 bus->bus_width = bus_attr->bus_width;
5646 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5647 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5648 if (bus_attr->qas_enabled)
5649 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5650 else
5651 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5652 }
5653}
5654
5655/**
5656 * ipr_build_mode_select - Build a mode select command
5657 * @ipr_cmd: ipr command struct
5658 * @res_handle: resource handle to send command to
5659 * @parm: Byte 2 of Mode Sense command
5660 * @dma_addr: DMA buffer address
5661 * @xfer_len: data transfer length
5662 *
5663 * Return value:
5664 * none
5665 **/
5666static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5667 __be32 res_handle, u8 parm, u32 dma_addr,
5668 u8 xfer_len)
5669{
5670 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5671 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5672
5673 ioarcb->res_handle = res_handle;
5674 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5675 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5676 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5677 ioarcb->cmd_pkt.cdb[1] = parm;
5678 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5679
5680 ioadl->flags_and_data_len =
5681 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5682 ioadl->address = cpu_to_be32(dma_addr);
5683 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5684 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5685}
5686
5687/**
5688 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5689 * @ipr_cmd: ipr command struct
5690 *
5691 * This function sets up the SCSI bus attributes and sends
5692 * a Mode Select for Page 28 to activate them.
5693 *
5694 * Return value:
5695 * IPR_RC_JOB_RETURN
5696 **/
5697static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5698{
5699 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5700 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5701 int length;
5702
5703 ENTER;
4733804c
BK
5704 ipr_scsi_bus_speed_limit(ioa_cfg);
5705 ipr_check_term_power(ioa_cfg, mode_pages);
5706 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5707 length = mode_pages->hdr.length + 1;
5708 mode_pages->hdr.length = 0;
1da177e4
LT
5709
5710 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5711 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5712 length);
5713
62275040 5714 ipr_cmd->job_step = ipr_setup_write_cache;
1da177e4
LT
5715 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5716
5717 LEAVE;
5718 return IPR_RC_JOB_RETURN;
5719}
5720
5721/**
5722 * ipr_build_mode_sense - Builds a mode sense command
5723 * @ipr_cmd: ipr command struct
5724 * @res: resource entry struct
5725 * @parm: Byte 2 of mode sense command
5726 * @dma_addr: DMA address of mode sense buffer
5727 * @xfer_len: Size of DMA buffer
5728 *
5729 * Return value:
5730 * none
5731 **/
5732static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5733 __be32 res_handle,
5734 u8 parm, u32 dma_addr, u8 xfer_len)
5735{
5736 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5737 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5738
5739 ioarcb->res_handle = res_handle;
5740 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5741 ioarcb->cmd_pkt.cdb[2] = parm;
5742 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5743 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5744
5745 ioadl->flags_and_data_len =
5746 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5747 ioadl->address = cpu_to_be32(dma_addr);
5748 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5749 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5750}
5751
dfed823e 5752/**
5753 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5754 * @ipr_cmd: ipr command struct
5755 *
5756 * This function handles the failure of an IOA bringup command.
5757 *
5758 * Return value:
5759 * IPR_RC_JOB_RETURN
5760 **/
5761static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5762{
5763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5764 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5765
5766 dev_err(&ioa_cfg->pdev->dev,
5767 "0x%02X failed with IOASC: 0x%08X\n",
5768 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5769
5770 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5771 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5772 return IPR_RC_JOB_RETURN;
5773}
5774
5775/**
5776 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5777 * @ipr_cmd: ipr command struct
5778 *
5779 * This function handles the failure of a Mode Sense to the IOAFP.
5780 * Some adapters do not handle all mode pages.
5781 *
5782 * Return value:
5783 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5784 **/
5785static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5786{
5787 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5788
5789 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5790 ipr_cmd->job_step = ipr_setup_write_cache;
5791 return IPR_RC_JOB_CONTINUE;
5792 }
5793
5794 return ipr_reset_cmd_failed(ipr_cmd);
5795}
5796
1da177e4
LT
5797/**
5798 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5799 * @ipr_cmd: ipr command struct
5800 *
5801 * This function send a Page 28 mode sense to the IOA to
5802 * retrieve SCSI bus attributes.
5803 *
5804 * Return value:
5805 * IPR_RC_JOB_RETURN
5806 **/
5807static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5808{
5809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5810
5811 ENTER;
5812 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5813 0x28, ioa_cfg->vpd_cbs_dma +
5814 offsetof(struct ipr_misc_cbs, mode_pages),
5815 sizeof(struct ipr_mode_pages));
5816
5817 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 5818 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
5819
5820 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5821
5822 LEAVE;
5823 return IPR_RC_JOB_RETURN;
5824}
5825
ac09c349
BK
5826/**
5827 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5828 * @ipr_cmd: ipr command struct
5829 *
5830 * This function enables dual IOA RAID support if possible.
5831 *
5832 * Return value:
5833 * IPR_RC_JOB_RETURN
5834 **/
5835static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5836{
5837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5838 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5839 struct ipr_mode_page24 *mode_page;
5840 int length;
5841
5842 ENTER;
5843 mode_page = ipr_get_mode_page(mode_pages, 0x24,
5844 sizeof(struct ipr_mode_page24));
5845
5846 if (mode_page)
5847 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5848
5849 length = mode_pages->hdr.length + 1;
5850 mode_pages->hdr.length = 0;
5851
5852 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5853 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5854 length);
5855
5856 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5857 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5858
5859 LEAVE;
5860 return IPR_RC_JOB_RETURN;
5861}
5862
5863/**
5864 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5865 * @ipr_cmd: ipr command struct
5866 *
5867 * This function handles the failure of a Mode Sense to the IOAFP.
5868 * Some adapters do not handle all mode pages.
5869 *
5870 * Return value:
5871 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5872 **/
5873static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5874{
5875 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5876
5877 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5878 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5879 return IPR_RC_JOB_CONTINUE;
5880 }
5881
5882 return ipr_reset_cmd_failed(ipr_cmd);
5883}
5884
5885/**
5886 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5887 * @ipr_cmd: ipr command struct
5888 *
5889 * This function send a mode sense to the IOA to retrieve
5890 * the IOA Advanced Function Control mode page.
5891 *
5892 * Return value:
5893 * IPR_RC_JOB_RETURN
5894 **/
5895static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5896{
5897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5898
5899 ENTER;
5900 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5901 0x24, ioa_cfg->vpd_cbs_dma +
5902 offsetof(struct ipr_misc_cbs, mode_pages),
5903 sizeof(struct ipr_mode_pages));
5904
5905 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5906 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5907
5908 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5909
5910 LEAVE;
5911 return IPR_RC_JOB_RETURN;
5912}
5913
1da177e4
LT
5914/**
5915 * ipr_init_res_table - Initialize the resource table
5916 * @ipr_cmd: ipr command struct
5917 *
5918 * This function looks through the existing resource table, comparing
5919 * it with the config table. This function will take care of old/new
5920 * devices and schedule adding/removing them from the mid-layer
5921 * as appropriate.
5922 *
5923 * Return value:
5924 * IPR_RC_JOB_CONTINUE
5925 **/
5926static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5927{
5928 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5929 struct ipr_resource_entry *res, *temp;
5930 struct ipr_config_table_entry *cfgte;
5931 int found, i;
5932 LIST_HEAD(old_res);
5933
5934 ENTER;
5935 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5936 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5937
5938 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5939 list_move_tail(&res->queue, &old_res);
5940
5941 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5942 cfgte = &ioa_cfg->cfg_table->dev[i];
5943 found = 0;
5944
5945 list_for_each_entry_safe(res, temp, &old_res, queue) {
5946 if (!memcmp(&res->cfgte.res_addr,
5947 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5948 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5949 found = 1;
5950 break;
5951 }
5952 }
5953
5954 if (!found) {
5955 if (list_empty(&ioa_cfg->free_res_q)) {
5956 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5957 break;
5958 }
5959
5960 found = 1;
5961 res = list_entry(ioa_cfg->free_res_q.next,
5962 struct ipr_resource_entry, queue);
5963 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5964 ipr_init_res_entry(res);
5965 res->add_to_ml = 1;
5966 }
5967
5968 if (found)
5969 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5970 }
5971
5972 list_for_each_entry_safe(res, temp, &old_res, queue) {
5973 if (res->sdev) {
5974 res->del_from_ml = 1;
1121b794 5975 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
5976 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5977 } else {
5978 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5979 }
5980 }
5981
ac09c349
BK
5982 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
5983 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
5984 else
5985 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
5986
5987 LEAVE;
5988 return IPR_RC_JOB_CONTINUE;
5989}
5990
5991/**
5992 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5993 * @ipr_cmd: ipr command struct
5994 *
5995 * This function sends a Query IOA Configuration command
5996 * to the adapter to retrieve the IOA configuration table.
5997 *
5998 * Return value:
5999 * IPR_RC_JOB_RETURN
6000 **/
6001static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6002{
6003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6004 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6005 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6006 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 6007 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
6008
6009 ENTER;
ac09c349
BK
6010 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6011 ioa_cfg->dual_raid = 1;
1da177e4
LT
6012 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6013 ucode_vpd->major_release, ucode_vpd->card_type,
6014 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6015 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6016 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6017
6018 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6019 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6020 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6021
6022 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6023 ioarcb->read_data_transfer_length =
6024 cpu_to_be32(sizeof(struct ipr_config_table));
6025
6026 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6027 ioadl->flags_and_data_len =
6028 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6029
6030 ipr_cmd->job_step = ipr_init_res_table;
6031
6032 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6033
6034 LEAVE;
6035 return IPR_RC_JOB_RETURN;
6036}
6037
6038/**
6039 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6040 * @ipr_cmd: ipr command struct
6041 *
6042 * This utility function sends an inquiry to the adapter.
6043 *
6044 * Return value:
6045 * none
6046 **/
6047static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6048 u32 dma_addr, u8 xfer_len)
6049{
6050 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6051 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6052
6053 ENTER;
6054 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6055 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6056
6057 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6058 ioarcb->cmd_pkt.cdb[1] = flags;
6059 ioarcb->cmd_pkt.cdb[2] = page;
6060 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6061
6062 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6063 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6064
6065 ioadl->address = cpu_to_be32(dma_addr);
6066 ioadl->flags_and_data_len =
6067 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6068
6069 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6070 LEAVE;
6071}
6072
62275040 6073/**
6074 * ipr_inquiry_page_supported - Is the given inquiry page supported
6075 * @page0: inquiry page 0 buffer
6076 * @page: page code.
6077 *
6078 * This function determines if the specified inquiry page is supported.
6079 *
6080 * Return value:
6081 * 1 if page is supported / 0 if not
6082 **/
6083static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6084{
6085 int i;
6086
6087 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6088 if (page0->page[i] == page)
6089 return 1;
6090
6091 return 0;
6092}
6093
ac09c349
BK
6094/**
6095 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6096 * @ipr_cmd: ipr command struct
6097 *
6098 * This function sends a Page 0xD0 inquiry to the adapter
6099 * to retrieve adapter capabilities.
6100 *
6101 * Return value:
6102 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6103 **/
6104static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6105{
6106 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6107 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6108 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6109
6110 ENTER;
6111 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6112 memset(cap, 0, sizeof(*cap));
6113
6114 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6115 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6116 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6117 sizeof(struct ipr_inquiry_cap));
6118 return IPR_RC_JOB_RETURN;
6119 }
6120
6121 LEAVE;
6122 return IPR_RC_JOB_CONTINUE;
6123}
6124
1da177e4
LT
6125/**
6126 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6127 * @ipr_cmd: ipr command struct
6128 *
6129 * This function sends a Page 3 inquiry to the adapter
6130 * to retrieve software VPD information.
6131 *
6132 * Return value:
6133 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6134 **/
6135static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 6136{
6137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6138 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6139
6140 ENTER;
6141
6142 if (!ipr_inquiry_page_supported(page0, 1))
6143 ioa_cfg->cache_state = CACHE_NONE;
6144
ac09c349 6145 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 6146
6147 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6148 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6149 sizeof(struct ipr_inquiry_page3));
6150
6151 LEAVE;
6152 return IPR_RC_JOB_RETURN;
6153}
6154
6155/**
6156 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6157 * @ipr_cmd: ipr command struct
6158 *
6159 * This function sends a Page 0 inquiry to the adapter
6160 * to retrieve supported inquiry pages.
6161 *
6162 * Return value:
6163 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6164 **/
6165static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6166{
6167 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6168 char type[5];
6169
6170 ENTER;
6171
6172 /* Grab the type out of the VPD and store it away */
6173 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6174 type[4] = '\0';
6175 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6176
62275040 6177 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 6178
62275040 6179 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6180 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6181 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
6182
6183 LEAVE;
6184 return IPR_RC_JOB_RETURN;
6185}
6186
6187/**
6188 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6189 * @ipr_cmd: ipr command struct
6190 *
6191 * This function sends a standard inquiry to the adapter.
6192 *
6193 * Return value:
6194 * IPR_RC_JOB_RETURN
6195 **/
6196static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6197{
6198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6199
6200 ENTER;
62275040 6201 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
6202
6203 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6204 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6205 sizeof(struct ipr_ioa_vpd));
6206
6207 LEAVE;
6208 return IPR_RC_JOB_RETURN;
6209}
6210
6211/**
6212 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6213 * @ipr_cmd: ipr command struct
6214 *
6215 * This function send an Identify Host Request Response Queue
6216 * command to establish the HRRQ with the adapter.
6217 *
6218 * Return value:
6219 * IPR_RC_JOB_RETURN
6220 **/
6221static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6222{
6223 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6224 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6225
6226 ENTER;
6227 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6228
6229 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6230 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6231
6232 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6233 ioarcb->cmd_pkt.cdb[2] =
6234 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6235 ioarcb->cmd_pkt.cdb[3] =
6236 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6237 ioarcb->cmd_pkt.cdb[4] =
6238 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6239 ioarcb->cmd_pkt.cdb[5] =
6240 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6241 ioarcb->cmd_pkt.cdb[7] =
6242 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6243 ioarcb->cmd_pkt.cdb[8] =
6244 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6245
6246 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6247
6248 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6249
6250 LEAVE;
6251 return IPR_RC_JOB_RETURN;
6252}
6253
6254/**
6255 * ipr_reset_timer_done - Adapter reset timer function
6256 * @ipr_cmd: ipr command struct
6257 *
6258 * Description: This function is used in adapter reset processing
6259 * for timing events. If the reset_cmd pointer in the IOA
6260 * config struct is not this adapter's we are doing nested
6261 * resets and fail_all_ops will take care of freeing the
6262 * command block.
6263 *
6264 * Return value:
6265 * none
6266 **/
6267static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6268{
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270 unsigned long lock_flags = 0;
6271
6272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6273
6274 if (ioa_cfg->reset_cmd == ipr_cmd) {
6275 list_del(&ipr_cmd->queue);
6276 ipr_cmd->done(ipr_cmd);
6277 }
6278
6279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6280}
6281
6282/**
6283 * ipr_reset_start_timer - Start a timer for adapter reset job
6284 * @ipr_cmd: ipr command struct
6285 * @timeout: timeout value
6286 *
6287 * Description: This function is used in adapter reset processing
6288 * for timing events. If the reset_cmd pointer in the IOA
6289 * config struct is not this adapter's we are doing nested
6290 * resets and fail_all_ops will take care of freeing the
6291 * command block.
6292 *
6293 * Return value:
6294 * none
6295 **/
6296static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6297 unsigned long timeout)
6298{
6299 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6300 ipr_cmd->done = ipr_reset_ioa_job;
6301
6302 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6303 ipr_cmd->timer.expires = jiffies + timeout;
6304 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6305 add_timer(&ipr_cmd->timer);
6306}
6307
6308/**
6309 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6310 * @ioa_cfg: ioa cfg struct
6311 *
6312 * Return value:
6313 * nothing
6314 **/
6315static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6316{
6317 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6318
6319 /* Initialize Host RRQ pointers */
6320 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6321 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6322 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6323 ioa_cfg->toggle_bit = 1;
6324
6325 /* Zero out config table */
6326 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6327}
6328
6329/**
6330 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6331 * @ipr_cmd: ipr command struct
6332 *
6333 * This function reinitializes some control blocks and
6334 * enables destructive diagnostics on the adapter.
6335 *
6336 * Return value:
6337 * IPR_RC_JOB_RETURN
6338 **/
6339static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6340{
6341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6342 volatile u32 int_reg;
6343
6344 ENTER;
6345 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6346 ipr_init_ioa_mem(ioa_cfg);
6347
6348 ioa_cfg->allow_interrupts = 1;
6349 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6350
6351 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6352 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6353 ioa_cfg->regs.clr_interrupt_mask_reg);
6354 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6355 return IPR_RC_JOB_CONTINUE;
6356 }
6357
6358 /* Enable destructive diagnostics on IOA */
3d1d0da6 6359 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
1da177e4
LT
6360
6361 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6362 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6363
6364 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6365
6366 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 6367 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
6368 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6369 ipr_cmd->done = ipr_reset_ioa_job;
6370 add_timer(&ipr_cmd->timer);
6371 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6372
6373 LEAVE;
6374 return IPR_RC_JOB_RETURN;
6375}
6376
6377/**
6378 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6379 * @ipr_cmd: ipr command struct
6380 *
6381 * This function is invoked when an adapter dump has run out
6382 * of processing time.
6383 *
6384 * Return value:
6385 * IPR_RC_JOB_CONTINUE
6386 **/
6387static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6388{
6389 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6390
6391 if (ioa_cfg->sdt_state == GET_DUMP)
6392 ioa_cfg->sdt_state = ABORT_DUMP;
6393
6394 ipr_cmd->job_step = ipr_reset_alert;
6395
6396 return IPR_RC_JOB_CONTINUE;
6397}
6398
6399/**
6400 * ipr_unit_check_no_data - Log a unit check/no data error log
6401 * @ioa_cfg: ioa config struct
6402 *
6403 * Logs an error indicating the adapter unit checked, but for some
6404 * reason, we were unable to fetch the unit check buffer.
6405 *
6406 * Return value:
6407 * nothing
6408 **/
6409static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6410{
6411 ioa_cfg->errors_logged++;
6412 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6413}
6414
6415/**
6416 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6417 * @ioa_cfg: ioa config struct
6418 *
6419 * Fetches the unit check buffer from the adapter by clocking the data
6420 * through the mailbox register.
6421 *
6422 * Return value:
6423 * nothing
6424 **/
6425static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6426{
6427 unsigned long mailbox;
6428 struct ipr_hostrcb *hostrcb;
6429 struct ipr_uc_sdt sdt;
6430 int rc, length;
65f56475 6431 u32 ioasc;
1da177e4
LT
6432
6433 mailbox = readl(ioa_cfg->ioa_mailbox);
6434
6435 if (!ipr_sdt_is_fmt2(mailbox)) {
6436 ipr_unit_check_no_data(ioa_cfg);
6437 return;
6438 }
6439
6440 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6441 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6442 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6443
6444 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6445 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6446 ipr_unit_check_no_data(ioa_cfg);
6447 return;
6448 }
6449
6450 /* Find length of the first sdt entry (UC buffer) */
6451 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6452 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6453
6454 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6455 struct ipr_hostrcb, queue);
6456 list_del(&hostrcb->queue);
6457 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6458
6459 rc = ipr_get_ldump_data_section(ioa_cfg,
6460 be32_to_cpu(sdt.entry[0].bar_str_offset),
6461 (__be32 *)&hostrcb->hcam,
6462 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6463
65f56475 6464 if (!rc) {
1da177e4 6465 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
6466 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6467 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6468 ioa_cfg->sdt_state == GET_DUMP)
6469 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6470 } else
1da177e4
LT
6471 ipr_unit_check_no_data(ioa_cfg);
6472
6473 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6474}
6475
6476/**
6477 * ipr_reset_restore_cfg_space - Restore PCI config space.
6478 * @ipr_cmd: ipr command struct
6479 *
6480 * Description: This function restores the saved PCI config space of
6481 * the adapter, fails all outstanding ops back to the callers, and
6482 * fetches the dump/unit check if applicable to this reset.
6483 *
6484 * Return value:
6485 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6486 **/
6487static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6488{
6489 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6490 int rc;
6491
6492 ENTER;
6493 rc = pci_restore_state(ioa_cfg->pdev);
6494
6495 if (rc != PCIBIOS_SUCCESSFUL) {
6496 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6497 return IPR_RC_JOB_CONTINUE;
6498 }
6499
6500 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6501 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6502 return IPR_RC_JOB_CONTINUE;
6503 }
6504
6505 ipr_fail_all_ops(ioa_cfg);
6506
6507 if (ioa_cfg->ioa_unit_checked) {
6508 ioa_cfg->ioa_unit_checked = 0;
6509 ipr_get_unit_check_buffer(ioa_cfg);
6510 ipr_cmd->job_step = ipr_reset_alert;
6511 ipr_reset_start_timer(ipr_cmd, 0);
6512 return IPR_RC_JOB_RETURN;
6513 }
6514
6515 if (ioa_cfg->in_ioa_bringdown) {
6516 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6517 } else {
6518 ipr_cmd->job_step = ipr_reset_enable_ioa;
6519
6520 if (GET_DUMP == ioa_cfg->sdt_state) {
6521 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6522 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6523 schedule_work(&ioa_cfg->work_q);
6524 return IPR_RC_JOB_RETURN;
6525 }
6526 }
6527
6528 ENTER;
6529 return IPR_RC_JOB_CONTINUE;
6530}
6531
e619e1a7
BK
6532/**
6533 * ipr_reset_bist_done - BIST has completed on the adapter.
6534 * @ipr_cmd: ipr command struct
6535 *
6536 * Description: Unblock config space and resume the reset process.
6537 *
6538 * Return value:
6539 * IPR_RC_JOB_CONTINUE
6540 **/
6541static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6542{
6543 ENTER;
6544 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6545 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6546 LEAVE;
6547 return IPR_RC_JOB_CONTINUE;
6548}
6549
1da177e4
LT
6550/**
6551 * ipr_reset_start_bist - Run BIST on the adapter.
6552 * @ipr_cmd: ipr command struct
6553 *
6554 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6555 *
6556 * Return value:
6557 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6558 **/
6559static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6560{
6561 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6562 int rc;
6563
6564 ENTER;
b30197d2 6565 pci_block_user_cfg_access(ioa_cfg->pdev);
1da177e4
LT
6566 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6567
6568 if (rc != PCIBIOS_SUCCESSFUL) {
a9aedb09 6569 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
1da177e4
LT
6570 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6571 rc = IPR_RC_JOB_CONTINUE;
6572 } else {
e619e1a7 6573 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
6574 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6575 rc = IPR_RC_JOB_RETURN;
6576 }
6577
6578 LEAVE;
6579 return rc;
6580}
6581
463fc696
BK
6582/**
6583 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6584 * @ipr_cmd: ipr command struct
6585 *
6586 * Description: This clears PCI reset to the adapter and delays two seconds.
6587 *
6588 * Return value:
6589 * IPR_RC_JOB_RETURN
6590 **/
6591static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6592{
6593 ENTER;
6594 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6595 ipr_cmd->job_step = ipr_reset_bist_done;
6596 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6597 LEAVE;
6598 return IPR_RC_JOB_RETURN;
6599}
6600
6601/**
6602 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6603 * @ipr_cmd: ipr command struct
6604 *
6605 * Description: This asserts PCI reset to the adapter.
6606 *
6607 * Return value:
6608 * IPR_RC_JOB_RETURN
6609 **/
6610static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6611{
6612 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6613 struct pci_dev *pdev = ioa_cfg->pdev;
6614
6615 ENTER;
6616 pci_block_user_cfg_access(pdev);
6617 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6618 ipr_cmd->job_step = ipr_reset_slot_reset_done;
6619 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6620 LEAVE;
6621 return IPR_RC_JOB_RETURN;
6622}
6623
1da177e4
LT
6624/**
6625 * ipr_reset_allowed - Query whether or not IOA can be reset
6626 * @ioa_cfg: ioa config struct
6627 *
6628 * Return value:
6629 * 0 if reset not allowed / non-zero if reset is allowed
6630 **/
6631static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6632{
6633 volatile u32 temp_reg;
6634
6635 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6636 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6637}
6638
6639/**
6640 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6641 * @ipr_cmd: ipr command struct
6642 *
6643 * Description: This function waits for adapter permission to run BIST,
6644 * then runs BIST. If the adapter does not give permission after a
6645 * reasonable time, we will reset the adapter anyway. The impact of
6646 * resetting the adapter without warning the adapter is the risk of
6647 * losing the persistent error log on the adapter. If the adapter is
6648 * reset while it is writing to the flash on the adapter, the flash
6649 * segment will have bad ECC and be zeroed.
6650 *
6651 * Return value:
6652 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6653 **/
6654static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6655{
6656 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6657 int rc = IPR_RC_JOB_RETURN;
6658
6659 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6660 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6661 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6662 } else {
463fc696 6663 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
6664 rc = IPR_RC_JOB_CONTINUE;
6665 }
6666
6667 return rc;
6668}
6669
6670/**
6671 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6672 * @ipr_cmd: ipr command struct
6673 *
6674 * Description: This function alerts the adapter that it will be reset.
6675 * If memory space is not currently enabled, proceed directly
6676 * to running BIST on the adapter. The timer must always be started
6677 * so we guarantee we do not run BIST from ipr_isr.
6678 *
6679 * Return value:
6680 * IPR_RC_JOB_RETURN
6681 **/
6682static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6683{
6684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6685 u16 cmd_reg;
6686 int rc;
6687
6688 ENTER;
6689 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6690
6691 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6692 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6693 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6694 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6695 } else {
463fc696 6696 ipr_cmd->job_step = ioa_cfg->reset;
1da177e4
LT
6697 }
6698
6699 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6700 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6701
6702 LEAVE;
6703 return IPR_RC_JOB_RETURN;
6704}
6705
6706/**
6707 * ipr_reset_ucode_download_done - Microcode download completion
6708 * @ipr_cmd: ipr command struct
6709 *
6710 * Description: This function unmaps the microcode download buffer.
6711 *
6712 * Return value:
6713 * IPR_RC_JOB_CONTINUE
6714 **/
6715static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6716{
6717 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6718 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6719
6720 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6721 sglist->num_sg, DMA_TO_DEVICE);
6722
6723 ipr_cmd->job_step = ipr_reset_alert;
6724 return IPR_RC_JOB_CONTINUE;
6725}
6726
6727/**
6728 * ipr_reset_ucode_download - Download microcode to the adapter
6729 * @ipr_cmd: ipr command struct
6730 *
6731 * Description: This function checks to see if it there is microcode
6732 * to download to the adapter. If there is, a download is performed.
6733 *
6734 * Return value:
6735 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6736 **/
6737static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6738{
6739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6740 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6741
6742 ENTER;
6743 ipr_cmd->job_step = ipr_reset_alert;
6744
6745 if (!sglist)
6746 return IPR_RC_JOB_CONTINUE;
6747
6748 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6749 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6750 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6751 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6752 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6753 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6754 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6755
12baa420 6756 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
6757 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6758
6759 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6760 IPR_WRITE_BUFFER_TIMEOUT);
6761
6762 LEAVE;
6763 return IPR_RC_JOB_RETURN;
6764}
6765
6766/**
6767 * ipr_reset_shutdown_ioa - Shutdown the adapter
6768 * @ipr_cmd: ipr command struct
6769 *
6770 * Description: This function issues an adapter shutdown of the
6771 * specified type to the specified adapter as part of the
6772 * adapter reset job.
6773 *
6774 * Return value:
6775 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6776 **/
6777static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6778{
6779 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6780 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6781 unsigned long timeout;
6782 int rc = IPR_RC_JOB_CONTINUE;
6783
6784 ENTER;
6785 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6786 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6787 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6788 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6789 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6790
ac09c349
BK
6791 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6792 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
6793 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6794 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
6795 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6796 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 6797 else
ac09c349 6798 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
6799
6800 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6801
6802 rc = IPR_RC_JOB_RETURN;
6803 ipr_cmd->job_step = ipr_reset_ucode_download;
6804 } else
6805 ipr_cmd->job_step = ipr_reset_alert;
6806
6807 LEAVE;
6808 return rc;
6809}
6810
6811/**
6812 * ipr_reset_ioa_job - Adapter reset job
6813 * @ipr_cmd: ipr command struct
6814 *
6815 * Description: This function is the job router for the adapter reset job.
6816 *
6817 * Return value:
6818 * none
6819 **/
6820static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6821{
6822 u32 rc, ioasc;
1da177e4
LT
6823 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824
6825 do {
6826 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6827
6828 if (ioa_cfg->reset_cmd != ipr_cmd) {
6829 /*
6830 * We are doing nested adapter resets and this is
6831 * not the current reset job.
6832 */
6833 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6834 return;
6835 }
6836
6837 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 6838 rc = ipr_cmd->job_step_failed(ipr_cmd);
6839 if (rc == IPR_RC_JOB_RETURN)
6840 return;
1da177e4
LT
6841 }
6842
6843 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 6844 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
6845 rc = ipr_cmd->job_step(ipr_cmd);
6846 } while(rc == IPR_RC_JOB_CONTINUE);
6847}
6848
6849/**
6850 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6851 * @ioa_cfg: ioa config struct
6852 * @job_step: first job step of reset job
6853 * @shutdown_type: shutdown type
6854 *
6855 * Description: This function will initiate the reset of the given adapter
6856 * starting at the selected job step.
6857 * If the caller needs to wait on the completion of the reset,
6858 * the caller must sleep on the reset_wait_q.
6859 *
6860 * Return value:
6861 * none
6862 **/
6863static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6864 int (*job_step) (struct ipr_cmnd *),
6865 enum ipr_shutdown_type shutdown_type)
6866{
6867 struct ipr_cmnd *ipr_cmd;
6868
6869 ioa_cfg->in_reset_reload = 1;
6870 ioa_cfg->allow_cmds = 0;
6871 scsi_block_requests(ioa_cfg->host);
6872
6873 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6874 ioa_cfg->reset_cmd = ipr_cmd;
6875 ipr_cmd->job_step = job_step;
6876 ipr_cmd->u.shutdown_type = shutdown_type;
6877
6878 ipr_reset_ioa_job(ipr_cmd);
6879}
6880
6881/**
6882 * ipr_initiate_ioa_reset - Initiate an adapter reset
6883 * @ioa_cfg: ioa config struct
6884 * @shutdown_type: shutdown type
6885 *
6886 * Description: This function will initiate the reset of the given adapter.
6887 * If the caller needs to wait on the completion of the reset,
6888 * the caller must sleep on the reset_wait_q.
6889 *
6890 * Return value:
6891 * none
6892 **/
6893static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6894 enum ipr_shutdown_type shutdown_type)
6895{
6896 if (ioa_cfg->ioa_is_dead)
6897 return;
6898
6899 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6900 ioa_cfg->sdt_state = ABORT_DUMP;
6901
6902 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6903 dev_err(&ioa_cfg->pdev->dev,
6904 "IOA taken offline - error recovery failed\n");
6905
6906 ioa_cfg->reset_retries = 0;
6907 ioa_cfg->ioa_is_dead = 1;
6908
6909 if (ioa_cfg->in_ioa_bringdown) {
6910 ioa_cfg->reset_cmd = NULL;
6911 ioa_cfg->in_reset_reload = 0;
6912 ipr_fail_all_ops(ioa_cfg);
6913 wake_up_all(&ioa_cfg->reset_wait_q);
6914
6915 spin_unlock_irq(ioa_cfg->host->host_lock);
6916 scsi_unblock_requests(ioa_cfg->host);
6917 spin_lock_irq(ioa_cfg->host->host_lock);
6918 return;
6919 } else {
6920 ioa_cfg->in_ioa_bringdown = 1;
6921 shutdown_type = IPR_SHUTDOWN_NONE;
6922 }
6923 }
6924
6925 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6926 shutdown_type);
6927}
6928
f8a88b19
LV
6929/**
6930 * ipr_reset_freeze - Hold off all I/O activity
6931 * @ipr_cmd: ipr command struct
6932 *
6933 * Description: If the PCI slot is frozen, hold off all I/O
6934 * activity; then, as soon as the slot is available again,
6935 * initiate an adapter reset.
6936 */
6937static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6938{
6939 /* Disallow new interrupts, avoid loop */
6940 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6941 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6942 ipr_cmd->done = ipr_reset_ioa_job;
6943 return IPR_RC_JOB_RETURN;
6944}
6945
6946/**
6947 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6948 * @pdev: PCI device struct
6949 *
6950 * Description: This routine is called to tell us that the PCI bus
6951 * is down. Can't do anything here, except put the device driver
6952 * into a holding pattern, waiting for the PCI bus to come back.
6953 */
6954static void ipr_pci_frozen(struct pci_dev *pdev)
6955{
6956 unsigned long flags = 0;
6957 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6958
6959 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6960 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6962}
6963
6964/**
6965 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6966 * @pdev: PCI device struct
6967 *
6968 * Description: This routine is called by the pci error recovery
6969 * code after the PCI slot has been reset, just before we
6970 * should resume normal operations.
6971 */
6972static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6973{
6974 unsigned long flags = 0;
6975 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6976
6977 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
6978 if (ioa_cfg->needs_warm_reset)
6979 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6980 else
6981 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6982 IPR_SHUTDOWN_NONE);
f8a88b19
LV
6983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6984 return PCI_ERS_RESULT_RECOVERED;
6985}
6986
6987/**
6988 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6989 * @pdev: PCI device struct
6990 *
6991 * Description: This routine is called when the PCI bus has
6992 * permanently failed.
6993 */
6994static void ipr_pci_perm_failure(struct pci_dev *pdev)
6995{
6996 unsigned long flags = 0;
6997 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6998
6999 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7000 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7001 ioa_cfg->sdt_state = ABORT_DUMP;
7002 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7003 ioa_cfg->in_ioa_bringdown = 1;
7004 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7006}
7007
7008/**
7009 * ipr_pci_error_detected - Called when a PCI error is detected.
7010 * @pdev: PCI device struct
7011 * @state: PCI channel state
7012 *
7013 * Description: Called when a PCI error is detected.
7014 *
7015 * Return value:
7016 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7017 */
7018static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7019 pci_channel_state_t state)
7020{
7021 switch (state) {
7022 case pci_channel_io_frozen:
7023 ipr_pci_frozen(pdev);
7024 return PCI_ERS_RESULT_NEED_RESET;
7025 case pci_channel_io_perm_failure:
7026 ipr_pci_perm_failure(pdev);
7027 return PCI_ERS_RESULT_DISCONNECT;
7028 break;
7029 default:
7030 break;
7031 }
7032 return PCI_ERS_RESULT_NEED_RESET;
7033}
7034
1da177e4
LT
7035/**
7036 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7037 * @ioa_cfg: ioa cfg struct
7038 *
7039 * Description: This is the second phase of adapter intialization
7040 * This function takes care of initilizing the adapter to the point
7041 * where it can accept new commands.
7042
7043 * Return value:
b1c11812 7044 * 0 on success / -EIO on failure
1da177e4
LT
7045 **/
7046static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7047{
7048 int rc = 0;
7049 unsigned long host_lock_flags = 0;
7050
7051 ENTER;
7052 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7053 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 7054 if (ioa_cfg->needs_hard_reset) {
7055 ioa_cfg->needs_hard_reset = 0;
7056 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7057 } else
7058 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7059 IPR_SHUTDOWN_NONE);
1da177e4
LT
7060
7061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7062 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7063 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7064
7065 if (ioa_cfg->ioa_is_dead) {
7066 rc = -EIO;
7067 } else if (ipr_invalid_adapter(ioa_cfg)) {
7068 if (!ipr_testmode)
7069 rc = -EIO;
7070
7071 dev_err(&ioa_cfg->pdev->dev,
7072 "Adapter not supported in this hardware configuration.\n");
7073 }
7074
7075 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7076
7077 LEAVE;
7078 return rc;
7079}
7080
7081/**
7082 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7083 * @ioa_cfg: ioa config struct
7084 *
7085 * Return value:
7086 * none
7087 **/
7088static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7089{
7090 int i;
7091
7092 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7093 if (ioa_cfg->ipr_cmnd_list[i])
7094 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7095 ioa_cfg->ipr_cmnd_list[i],
7096 ioa_cfg->ipr_cmnd_list_dma[i]);
7097
7098 ioa_cfg->ipr_cmnd_list[i] = NULL;
7099 }
7100
7101 if (ioa_cfg->ipr_cmd_pool)
7102 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7103
7104 ioa_cfg->ipr_cmd_pool = NULL;
7105}
7106
7107/**
7108 * ipr_free_mem - Frees memory allocated for an adapter
7109 * @ioa_cfg: ioa cfg struct
7110 *
7111 * Return value:
7112 * nothing
7113 **/
7114static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7115{
7116 int i;
7117
7118 kfree(ioa_cfg->res_entries);
7119 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7120 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7121 ipr_free_cmd_blks(ioa_cfg);
7122 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7123 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7124 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7125 ioa_cfg->cfg_table,
7126 ioa_cfg->cfg_table_dma);
7127
7128 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7129 pci_free_consistent(ioa_cfg->pdev,
7130 sizeof(struct ipr_hostrcb),
7131 ioa_cfg->hostrcb[i],
7132 ioa_cfg->hostrcb_dma[i]);
7133 }
7134
7135 ipr_free_dump(ioa_cfg);
1da177e4
LT
7136 kfree(ioa_cfg->trace);
7137}
7138
7139/**
7140 * ipr_free_all_resources - Free all allocated resources for an adapter.
7141 * @ipr_cmd: ipr command struct
7142 *
7143 * This function frees all allocated resources for the
7144 * specified adapter.
7145 *
7146 * Return value:
7147 * none
7148 **/
7149static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7150{
7151 struct pci_dev *pdev = ioa_cfg->pdev;
7152
7153 ENTER;
7154 free_irq(pdev->irq, ioa_cfg);
7155 iounmap(ioa_cfg->hdw_dma_regs);
7156 pci_release_regions(pdev);
7157 ipr_free_mem(ioa_cfg);
7158 scsi_host_put(ioa_cfg->host);
7159 pci_disable_device(pdev);
7160 LEAVE;
7161}
7162
7163/**
7164 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7165 * @ioa_cfg: ioa config struct
7166 *
7167 * Return value:
7168 * 0 on success / -ENOMEM on allocation failure
7169 **/
7170static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7171{
7172 struct ipr_cmnd *ipr_cmd;
7173 struct ipr_ioarcb *ioarcb;
7174 dma_addr_t dma_addr;
7175 int i;
7176
7177 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7178 sizeof(struct ipr_cmnd), 8, 0);
7179
7180 if (!ioa_cfg->ipr_cmd_pool)
7181 return -ENOMEM;
7182
7183 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 7184 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
7185
7186 if (!ipr_cmd) {
7187 ipr_free_cmd_blks(ioa_cfg);
7188 return -ENOMEM;
7189 }
7190
7191 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7192 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7193 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7194
7195 ioarcb = &ipr_cmd->ioarcb;
7196 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7197 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7198 ioarcb->write_ioadl_addr =
7199 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7200 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7201 ioarcb->ioasa_host_pci_addr =
7202 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7203 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7204 ipr_cmd->cmd_index = i;
7205 ipr_cmd->ioa_cfg = ioa_cfg;
7206 ipr_cmd->sense_buffer_dma = dma_addr +
7207 offsetof(struct ipr_cmnd, sense_buffer);
7208
7209 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7210 }
7211
7212 return 0;
7213}
7214
7215/**
7216 * ipr_alloc_mem - Allocate memory for an adapter
7217 * @ioa_cfg: ioa config struct
7218 *
7219 * Return value:
7220 * 0 on success / non-zero for error
7221 **/
7222static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7223{
7224 struct pci_dev *pdev = ioa_cfg->pdev;
7225 int i, rc = -ENOMEM;
7226
7227 ENTER;
0bc42e35 7228 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
1da177e4
LT
7229 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7230
7231 if (!ioa_cfg->res_entries)
7232 goto out;
7233
1da177e4
LT
7234 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7235 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7236
7237 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7238 sizeof(struct ipr_misc_cbs),
7239 &ioa_cfg->vpd_cbs_dma);
7240
7241 if (!ioa_cfg->vpd_cbs)
7242 goto out_free_res_entries;
7243
7244 if (ipr_alloc_cmd_blks(ioa_cfg))
7245 goto out_free_vpd_cbs;
7246
7247 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7248 sizeof(u32) * IPR_NUM_CMD_BLKS,
7249 &ioa_cfg->host_rrq_dma);
7250
7251 if (!ioa_cfg->host_rrq)
7252 goto out_ipr_free_cmd_blocks;
7253
7254 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7255 sizeof(struct ipr_config_table),
7256 &ioa_cfg->cfg_table_dma);
7257
7258 if (!ioa_cfg->cfg_table)
7259 goto out_free_host_rrq;
7260
7261 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7262 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7263 sizeof(struct ipr_hostrcb),
7264 &ioa_cfg->hostrcb_dma[i]);
7265
7266 if (!ioa_cfg->hostrcb[i])
7267 goto out_free_hostrcb_dma;
7268
7269 ioa_cfg->hostrcb[i]->hostrcb_dma =
7270 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 7271 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
7272 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7273 }
7274
0bc42e35 7275 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
7276 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7277
7278 if (!ioa_cfg->trace)
7279 goto out_free_hostrcb_dma;
7280
1da177e4
LT
7281 rc = 0;
7282out:
7283 LEAVE;
7284 return rc;
7285
7286out_free_hostrcb_dma:
7287 while (i-- > 0) {
7288 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7289 ioa_cfg->hostrcb[i],
7290 ioa_cfg->hostrcb_dma[i]);
7291 }
7292 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7293 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7294out_free_host_rrq:
7295 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7296 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7297out_ipr_free_cmd_blocks:
7298 ipr_free_cmd_blks(ioa_cfg);
7299out_free_vpd_cbs:
7300 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7301 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7302out_free_res_entries:
7303 kfree(ioa_cfg->res_entries);
7304 goto out;
7305}
7306
7307/**
7308 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7309 * @ioa_cfg: ioa config struct
7310 *
7311 * Return value:
7312 * none
7313 **/
7314static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7315{
7316 int i;
7317
7318 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7319 ioa_cfg->bus_attr[i].bus = i;
7320 ioa_cfg->bus_attr[i].qas_enabled = 0;
7321 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7322 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7323 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7324 else
7325 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7326 }
7327}
7328
7329/**
7330 * ipr_init_ioa_cfg - Initialize IOA config struct
7331 * @ioa_cfg: ioa config struct
7332 * @host: scsi host struct
7333 * @pdev: PCI dev struct
7334 *
7335 * Return value:
7336 * none
7337 **/
7338static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7339 struct Scsi_Host *host, struct pci_dev *pdev)
7340{
7341 const struct ipr_interrupt_offsets *p;
7342 struct ipr_interrupts *t;
7343 void __iomem *base;
7344
7345 ioa_cfg->host = host;
7346 ioa_cfg->pdev = pdev;
7347 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 7348 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
7349 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7350 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7351 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7352 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7353 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7354 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7355 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7356 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7357
7358 INIT_LIST_HEAD(&ioa_cfg->free_q);
7359 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7360 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7361 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7362 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7363 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 7364 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4
LT
7365 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7366 ioa_cfg->sdt_state = INACTIVE;
62275040 7367 if (ipr_enable_cache)
7368 ioa_cfg->cache_state = CACHE_ENABLED;
7369 else
7370 ioa_cfg->cache_state = CACHE_DISABLED;
1da177e4
LT
7371
7372 ipr_initialize_bus_attr(ioa_cfg);
7373
7374 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7375 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7376 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7377 host->unique_id = host->host_no;
7378 host->max_cmd_len = IPR_MAX_CDB_LEN;
7379 pci_set_drvdata(pdev, ioa_cfg);
7380
7381 p = &ioa_cfg->chip_cfg->regs;
7382 t = &ioa_cfg->regs;
7383 base = ioa_cfg->hdw_dma_regs;
7384
7385 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7386 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7387 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7388 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7389 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7390 t->ioarrin_reg = base + p->ioarrin_reg;
7391 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7392 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7393 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7394}
7395
7396/**
7397 * ipr_get_chip_cfg - Find adapter chip configuration
7398 * @dev_id: PCI device id struct
7399 *
7400 * Return value:
7401 * ptr to chip config on success / NULL on failure
7402 **/
7403static const struct ipr_chip_cfg_t * __devinit
7404ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7405{
7406 int i;
7407
1da177e4
LT
7408 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7409 if (ipr_chip[i].vendor == dev_id->vendor &&
7410 ipr_chip[i].device == dev_id->device)
7411 return ipr_chip[i].cfg;
7412 return NULL;
7413}
7414
7415/**
7416 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7417 * @pdev: PCI device struct
7418 * @dev_id: PCI device id struct
7419 *
7420 * Return value:
7421 * 0 on success / non-zero on failure
7422 **/
7423static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7424 const struct pci_device_id *dev_id)
7425{
7426 struct ipr_ioa_cfg *ioa_cfg;
7427 struct Scsi_Host *host;
7428 unsigned long ipr_regs_pci;
7429 void __iomem *ipr_regs;
a2a65a3e 7430 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 7431 volatile u32 mask, uproc, interrupts;
1da177e4
LT
7432
7433 ENTER;
7434
7435 if ((rc = pci_enable_device(pdev))) {
7436 dev_err(&pdev->dev, "Cannot enable adapter\n");
7437 goto out;
7438 }
7439
7440 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7441
7442 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7443
7444 if (!host) {
7445 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7446 rc = -ENOMEM;
7447 goto out_disable;
7448 }
7449
7450 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7451 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
7452 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7453 sata_port_info.flags, &ipr_sata_ops);
1da177e4
LT
7454
7455 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7456
7457 if (!ioa_cfg->chip_cfg) {
7458 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7459 dev_id->vendor, dev_id->device);
7460 goto out_scsi_host_put;
7461 }
7462
5469cb5b
BK
7463 if (ipr_transop_timeout)
7464 ioa_cfg->transop_timeout = ipr_transop_timeout;
7465 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7466 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7467 else
7468 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7469
44c10138 7470 ioa_cfg->revid = pdev->revision;
463fc696 7471
1da177e4
LT
7472 ipr_regs_pci = pci_resource_start(pdev, 0);
7473
7474 rc = pci_request_regions(pdev, IPR_NAME);
7475 if (rc < 0) {
7476 dev_err(&pdev->dev,
7477 "Couldn't register memory range of registers\n");
7478 goto out_scsi_host_put;
7479 }
7480
7481 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7482
7483 if (!ipr_regs) {
7484 dev_err(&pdev->dev,
7485 "Couldn't map memory range of registers\n");
7486 rc = -ENOMEM;
7487 goto out_release_regions;
7488 }
7489
7490 ioa_cfg->hdw_dma_regs = ipr_regs;
7491 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7492 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7493
7494 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7495
7496 pci_set_master(pdev);
7497
7498 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7499 if (rc < 0) {
7500 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7501 goto cleanup_nomem;
7502 }
7503
7504 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7505 ioa_cfg->chip_cfg->cache_line_size);
7506
7507 if (rc != PCIBIOS_SUCCESSFUL) {
7508 dev_err(&pdev->dev, "Write of cache line size failed\n");
7509 rc = -EIO;
7510 goto cleanup_nomem;
7511 }
7512
7513 /* Save away PCI config space for use following IOA reset */
7514 rc = pci_save_state(pdev);
7515
7516 if (rc != PCIBIOS_SUCCESSFUL) {
7517 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7518 rc = -EIO;
7519 goto cleanup_nomem;
7520 }
7521
7522 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7523 goto cleanup_nomem;
7524
7525 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7526 goto cleanup_nomem;
7527
7528 rc = ipr_alloc_mem(ioa_cfg);
7529 if (rc < 0) {
7530 dev_err(&pdev->dev,
7531 "Couldn't allocate enough memory for device driver!\n");
7532 goto cleanup_nomem;
7533 }
7534
ce155cce 7535 /*
7536 * If HRRQ updated interrupt is not masked, or reset alert is set,
7537 * the card is in an unknown state and needs a hard reset
7538 */
7539 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
473b1e8e 7540 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
ce155cce 7541 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7542 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7543 ioa_cfg->needs_hard_reset = 1;
473b1e8e
BK
7544 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7545 ioa_cfg->needs_hard_reset = 1;
7546 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7547 ioa_cfg->ioa_unit_checked = 1;
ce155cce 7548
1da177e4 7549 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
1d6f359a 7550 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
1da177e4
LT
7551
7552 if (rc) {
7553 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7554 pdev->irq, rc);
7555 goto cleanup_nolog;
7556 }
7557
463fc696
BK
7558 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7559 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7560 ioa_cfg->needs_warm_reset = 1;
7561 ioa_cfg->reset = ipr_reset_slot_reset;
7562 } else
7563 ioa_cfg->reset = ipr_reset_start_bist;
7564
1da177e4
LT
7565 spin_lock(&ipr_driver_lock);
7566 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7567 spin_unlock(&ipr_driver_lock);
7568
7569 LEAVE;
7570out:
7571 return rc;
7572
7573cleanup_nolog:
7574 ipr_free_mem(ioa_cfg);
7575cleanup_nomem:
7576 iounmap(ipr_regs);
7577out_release_regions:
7578 pci_release_regions(pdev);
7579out_scsi_host_put:
7580 scsi_host_put(host);
7581out_disable:
7582 pci_disable_device(pdev);
7583 goto out;
7584}
7585
7586/**
7587 * ipr_scan_vsets - Scans for VSET devices
7588 * @ioa_cfg: ioa config struct
7589 *
7590 * Description: Since the VSET resources do not follow SAM in that we can have
7591 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7592 *
7593 * Return value:
7594 * none
7595 **/
7596static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7597{
7598 int target, lun;
7599
7600 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7601 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7602 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7603}
7604
7605/**
7606 * ipr_initiate_ioa_bringdown - Bring down an adapter
7607 * @ioa_cfg: ioa config struct
7608 * @shutdown_type: shutdown type
7609 *
7610 * Description: This function will initiate bringing down the adapter.
7611 * This consists of issuing an IOA shutdown to the adapter
7612 * to flush the cache, and running BIST.
7613 * If the caller needs to wait on the completion of the reset,
7614 * the caller must sleep on the reset_wait_q.
7615 *
7616 * Return value:
7617 * none
7618 **/
7619static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7620 enum ipr_shutdown_type shutdown_type)
7621{
7622 ENTER;
7623 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7624 ioa_cfg->sdt_state = ABORT_DUMP;
7625 ioa_cfg->reset_retries = 0;
7626 ioa_cfg->in_ioa_bringdown = 1;
7627 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7628 LEAVE;
7629}
7630
7631/**
7632 * __ipr_remove - Remove a single adapter
7633 * @pdev: pci device struct
7634 *
7635 * Adapter hot plug remove entry point.
7636 *
7637 * Return value:
7638 * none
7639 **/
7640static void __ipr_remove(struct pci_dev *pdev)
7641{
7642 unsigned long host_lock_flags = 0;
7643 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7644 ENTER;
7645
7646 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
7647 while(ioa_cfg->in_reset_reload) {
7648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7649 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7650 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7651 }
7652
1da177e4
LT
7653 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7654
7655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7656 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5cbf5eae 7657 flush_scheduled_work();
1da177e4
LT
7658 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7659
7660 spin_lock(&ipr_driver_lock);
7661 list_del(&ioa_cfg->queue);
7662 spin_unlock(&ipr_driver_lock);
7663
7664 if (ioa_cfg->sdt_state == ABORT_DUMP)
7665 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7667
7668 ipr_free_all_resources(ioa_cfg);
7669
7670 LEAVE;
7671}
7672
7673/**
7674 * ipr_remove - IOA hot plug remove entry point
7675 * @pdev: pci device struct
7676 *
7677 * Adapter hot plug remove entry point.
7678 *
7679 * Return value:
7680 * none
7681 **/
7682static void ipr_remove(struct pci_dev *pdev)
7683{
7684 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7685
7686 ENTER;
7687
ee959b00 7688 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 7689 &ipr_trace_attr);
ee959b00 7690 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
7691 &ipr_dump_attr);
7692 scsi_remove_host(ioa_cfg->host);
7693
7694 __ipr_remove(pdev);
7695
7696 LEAVE;
7697}
7698
7699/**
7700 * ipr_probe - Adapter hot plug add entry point
7701 *
7702 * Return value:
7703 * 0 on success / non-zero on failure
7704 **/
7705static int __devinit ipr_probe(struct pci_dev *pdev,
7706 const struct pci_device_id *dev_id)
7707{
7708 struct ipr_ioa_cfg *ioa_cfg;
7709 int rc;
7710
7711 rc = ipr_probe_ioa(pdev, dev_id);
7712
7713 if (rc)
7714 return rc;
7715
7716 ioa_cfg = pci_get_drvdata(pdev);
7717 rc = ipr_probe_ioa_part2(ioa_cfg);
7718
7719 if (rc) {
7720 __ipr_remove(pdev);
7721 return rc;
7722 }
7723
7724 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7725
7726 if (rc) {
7727 __ipr_remove(pdev);
7728 return rc;
7729 }
7730
ee959b00 7731 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
7732 &ipr_trace_attr);
7733
7734 if (rc) {
7735 scsi_remove_host(ioa_cfg->host);
7736 __ipr_remove(pdev);
7737 return rc;
7738 }
7739
ee959b00 7740 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
7741 &ipr_dump_attr);
7742
7743 if (rc) {
ee959b00 7744 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
7745 &ipr_trace_attr);
7746 scsi_remove_host(ioa_cfg->host);
7747 __ipr_remove(pdev);
7748 return rc;
7749 }
7750
7751 scsi_scan_host(ioa_cfg->host);
7752 ipr_scan_vsets(ioa_cfg);
7753 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7754 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 7755 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
7756 schedule_work(&ioa_cfg->work_q);
7757 return 0;
7758}
7759
7760/**
7761 * ipr_shutdown - Shutdown handler.
d18c3db5 7762 * @pdev: pci device struct
1da177e4
LT
7763 *
7764 * This function is invoked upon system shutdown/reboot. It will issue
7765 * an adapter shutdown to the adapter to flush the write cache.
7766 *
7767 * Return value:
7768 * none
7769 **/
d18c3db5 7770static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 7771{
d18c3db5 7772 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
7773 unsigned long lock_flags = 0;
7774
7775 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
7776 while(ioa_cfg->in_reset_reload) {
7777 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7778 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7779 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7780 }
7781
1da177e4
LT
7782 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7784 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7785}
7786
7787static struct pci_device_id ipr_pci_table[] __devinitdata = {
7788 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7789 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 7790 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7791 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 7792 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7793 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 7794 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 7795 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 7796 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7797 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 7798 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7799 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 7800 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 7801 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 7802 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
7803 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7804 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7805 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 7806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7807 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
7808 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7809 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 7810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
7811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7812 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 7813 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 7814 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 7815 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
7816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7817 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 7818 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
7819 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7820 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 7821 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
7822 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7823 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7824 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7825 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7826 IPR_USE_LONG_TRANSOP_TIMEOUT },
7827 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7828 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
60e7486b 7829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 7830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 7831 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 7832 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 7833 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 7834 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 7835 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 7836 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7837 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7838 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 7839 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
7840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7841 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
7842 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7843 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7844 IPR_USE_LONG_TRANSOP_TIMEOUT },
1da177e4
LT
7845 { }
7846};
7847MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7848
f8a88b19
LV
7849static struct pci_error_handlers ipr_err_handler = {
7850 .error_detected = ipr_pci_error_detected,
7851 .slot_reset = ipr_pci_slot_reset,
7852};
7853
1da177e4
LT
7854static struct pci_driver ipr_driver = {
7855 .name = IPR_NAME,
7856 .id_table = ipr_pci_table,
7857 .probe = ipr_probe,
7858 .remove = ipr_remove,
d18c3db5 7859 .shutdown = ipr_shutdown,
f8a88b19 7860 .err_handler = &ipr_err_handler,
68c96e59 7861 .dynids.use_driver_data = 1
1da177e4
LT
7862};
7863
7864/**
7865 * ipr_init - Module entry point
7866 *
7867 * Return value:
7868 * 0 on success / negative value on failure
7869 **/
7870static int __init ipr_init(void)
7871{
7872 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7873 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7874
dcbccbde 7875 return pci_register_driver(&ipr_driver);
1da177e4
LT
7876}
7877
7878/**
7879 * ipr_exit - Module unload
7880 *
7881 * Module unload entry point.
7882 *
7883 * Return value:
7884 * none
7885 **/
7886static void __exit ipr_exit(void)
7887{
7888 pci_unregister_driver(&ipr_driver);
7889}
7890
7891module_init(ipr_init);
7892module_exit(ipr_exit);
This page took 0.784394 seconds and 5 git commands to generate.