[SCSI] ipr: Remove unnecessary memory barriers
[deliverable/linux.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
101static DEFINE_SPINLOCK(ipr_driver_lock);
102
103/* This table describes the differences between DMA controller chips */
104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4
LT
106 .mailbox = 0x0042C,
107 .cache_line_size = 0x20,
7dd21308 108 .clear_isr = 1,
1da177e4
LT
109 {
110 .set_interrupt_mask_reg = 0x0022C,
111 .clr_interrupt_mask_reg = 0x00230,
214777ba 112 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 113 .sense_interrupt_mask_reg = 0x0022C,
214777ba 114 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 115 .clr_interrupt_reg = 0x00228,
214777ba 116 .clr_interrupt_reg32 = 0x00228,
1da177e4 117 .sense_interrupt_reg = 0x00224,
214777ba 118 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
119 .ioarrin_reg = 0x00404,
120 .sense_uproc_interrupt_reg = 0x00214,
214777ba 121 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 122 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
123 .set_uproc_interrupt_reg32 = 0x00214,
124 .clr_uproc_interrupt_reg = 0x00218,
125 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
126 }
127 },
128 { /* Snipe and Scamp */
129 .mailbox = 0x0052C,
130 .cache_line_size = 0x20,
7dd21308 131 .clear_isr = 1,
1da177e4
LT
132 {
133 .set_interrupt_mask_reg = 0x00288,
134 .clr_interrupt_mask_reg = 0x0028C,
214777ba 135 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 136 .sense_interrupt_mask_reg = 0x00288,
214777ba 137 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 138 .clr_interrupt_reg = 0x00284,
214777ba 139 .clr_interrupt_reg32 = 0x00284,
1da177e4 140 .sense_interrupt_reg = 0x00280,
214777ba 141 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
142 .ioarrin_reg = 0x00504,
143 .sense_uproc_interrupt_reg = 0x00290,
214777ba 144 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 145 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
146 .set_uproc_interrupt_reg32 = 0x00290,
147 .clr_uproc_interrupt_reg = 0x00294,
148 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
149 }
150 },
a74c1639 151 { /* CRoC */
110def85 152 .mailbox = 0x00044,
a74c1639 153 .cache_line_size = 0x20,
7dd21308 154 .clear_isr = 0,
a74c1639
WB
155 {
156 .set_interrupt_mask_reg = 0x00010,
157 .clr_interrupt_mask_reg = 0x00018,
214777ba 158 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 159 .sense_interrupt_mask_reg = 0x00010,
214777ba 160 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 161 .clr_interrupt_reg = 0x00008,
214777ba 162 .clr_interrupt_reg32 = 0x0000C,
a74c1639 163 .sense_interrupt_reg = 0x00000,
214777ba 164 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
165 .ioarrin_reg = 0x00070,
166 .sense_uproc_interrupt_reg = 0x00020,
214777ba 167 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 168 .set_uproc_interrupt_reg = 0x00020,
214777ba 169 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 170 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
171 .clr_uproc_interrupt_reg32 = 0x0002C,
172 .init_feedback_reg = 0x0005C,
dcbad00e 173 .dump_addr_reg = 0x00064,
8701f185
WB
174 .dump_data_reg = 0x00068,
175 .endian_swap_reg = 0x00084
a74c1639
WB
176 }
177 },
1da177e4
LT
178};
179
180static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
181 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
183 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
187 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
190};
191
192static int ipr_max_bus_speeds [] = {
193 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
194};
195
196MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
197MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
198module_param_named(max_speed, ipr_max_speed, uint, 0);
199MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
200module_param_named(log_level, ipr_log_level, uint, 0);
201MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
202module_param_named(testmode, ipr_testmode, int, 0);
203MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 204module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
205MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
206module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
207MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 208module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 209MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
210module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
211MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
212module_param_named(max_devs, ipr_max_devs, int, 0);
213MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
214 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
215MODULE_LICENSE("GPL");
216MODULE_VERSION(IPR_DRIVER_VERSION);
217
1da177e4
LT
218/* A constant array of IOASCs/URCs/Error Messages */
219static const
220struct ipr_error_table_t ipr_error_table[] = {
933916f3 221 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
222 "8155: An unknown error was received"},
223 {0x00330000, 0, 0,
224 "Soft underlength error"},
225 {0x005A0000, 0, 0,
226 "Command to be cancelled not found"},
227 {0x00808000, 0, 0,
228 "Qualified success"},
933916f3 229 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 230 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 231 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 232 "4101: Soft device bus fabric error"},
5aa3a333
WB
233 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
234 "FFFC: Logical block guard error recovered by the device"},
235 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
236 "FFFC: Logical block reference tag error recovered by the device"},
237 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
238 "4171: Recovered scatter list tag / sequence number error"},
239 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
241 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
242 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
243 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
244 "FFFD: Recovered logical block reference tag error detected by the IOA"},
245 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 247 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 248 "FFF9: Device sector reassign successful"},
933916f3 249 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 250 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 251 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 252 "7001: IOA sector reassignment successful"},
933916f3 253 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 254 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 255 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 256 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 257 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 258 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 259 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 260 "FFF6: Device hardware error recovered by the IOA"},
933916f3 261 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 262 "FFF6: Device hardware error recovered by the device"},
933916f3 263 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 264 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 265 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 266 "FFFA: Undefined device response recovered by the IOA"},
933916f3 267 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 268 "FFF6: Device bus error, message or command phase"},
933916f3 269 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 270 "FFFE: Task Management Function failed"},
933916f3 271 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 272 "FFF6: Failure prediction threshold exceeded"},
933916f3 273 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
274 "8009: Impending cache battery pack failure"},
275 {0x02040400, 0, 0,
276 "34FF: Disk device format in progress"},
65f56475
BK
277 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
278 "9070: IOA requested reset"},
1da177e4
LT
279 {0x023F0000, 0, 0,
280 "Synchronization required"},
281 {0x024E0000, 0, 0,
282 "No ready, IOA shutdown"},
283 {0x025A0000, 0, 0,
284 "Not ready, IOA has been shutdown"},
933916f3 285 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
286 "3020: Storage subsystem configuration error"},
287 {0x03110B00, 0, 0,
288 "FFF5: Medium error, data unreadable, recommend reassign"},
289 {0x03110C00, 0, 0,
290 "7000: Medium error, data unreadable, do not reassign"},
933916f3 291 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 292 "FFF3: Disk media format bad"},
933916f3 293 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 294 "3002: Addressed device failed to respond to selection"},
933916f3 295 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 296 "3100: Device bus error"},
933916f3 297 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
298 "3109: IOA timed out a device command"},
299 {0x04088000, 0, 0,
300 "3120: SCSI bus is not operational"},
933916f3 301 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 302 "4100: Hard device bus fabric error"},
5aa3a333
WB
303 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
304 "310C: Logical block guard error detected by the device"},
305 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
306 "310C: Logical block reference tag error detected by the device"},
307 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
308 "4170: Scatter list tag / sequence number error"},
309 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
310 "8150: Logical block CRC error on IOA to Host transfer"},
311 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
312 "4170: Logical block sequence number error on IOA to Host transfer"},
313 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
314 "310D: Logical block reference tag error detected by the IOA"},
315 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
316 "310D: Logical block guard error detected by the IOA"},
933916f3 317 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 318 "9000: IOA reserved area data check"},
933916f3 319 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 320 "9001: IOA reserved area invalid data pattern"},
933916f3 321 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 322 "9002: IOA reserved area LRC error"},
5aa3a333
WB
323 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
324 "Hardware Error, IOA metadata access error"},
933916f3 325 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 326 "102E: Out of alternate sectors for disk storage"},
933916f3 327 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 328 "FFF4: Data transfer underlength error"},
933916f3 329 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 330 "FFF4: Data transfer overlength error"},
933916f3 331 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 332 "3400: Logical unit failure"},
933916f3 333 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 334 "FFF4: Device microcode is corrupt"},
933916f3 335 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
336 "8150: PCI bus error"},
337 {0x04430000, 1, 0,
338 "Unsupported device bus message received"},
933916f3 339 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 340 "FFF4: Disk device problem"},
933916f3 341 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 342 "8150: Permanent IOA failure"},
933916f3 343 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 344 "3010: Disk device returned wrong response to IOA"},
933916f3 345 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
346 "8151: IOA microcode error"},
347 {0x04448500, 0, 0,
348 "Device bus status error"},
933916f3 349 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 350 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
351 {0x04448700, 0, 0,
352 "ATA device status error"},
1da177e4
LT
353 {0x04490000, 0, 0,
354 "Message reject received from the device"},
933916f3 355 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 356 "8008: A permanent cache battery pack failure occurred"},
933916f3 357 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 358 "9090: Disk unit has been modified after the last known status"},
933916f3 359 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 360 "9081: IOA detected device error"},
933916f3 361 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 362 "9082: IOA detected device error"},
933916f3 363 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 364 "3110: Device bus error, message or command phase"},
933916f3 365 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 366 "3110: SAS Command / Task Management Function failed"},
933916f3 367 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 368 "9091: Incorrect hardware configuration change has been detected"},
933916f3 369 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 370 "9073: Invalid multi-adapter configuration"},
933916f3 371 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 372 "4010: Incorrect connection between cascaded expanders"},
933916f3 373 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 374 "4020: Connections exceed IOA design limits"},
933916f3 375 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 376 "4030: Incorrect multipath connection"},
933916f3 377 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 378 "4110: Unsupported enclosure function"},
933916f3 379 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
380 "FFF4: Command to logical unit failed"},
381 {0x05240000, 1, 0,
382 "Illegal request, invalid request type or request packet"},
383 {0x05250000, 0, 0,
384 "Illegal request, invalid resource handle"},
b0df54bb 385 {0x05258000, 0, 0,
386 "Illegal request, commands not allowed to this device"},
387 {0x05258100, 0, 0,
388 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
389 {0x05258200, 0, 0,
390 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
391 {0x05260000, 0, 0,
392 "Illegal request, invalid field in parameter list"},
393 {0x05260100, 0, 0,
394 "Illegal request, parameter not supported"},
395 {0x05260200, 0, 0,
396 "Illegal request, parameter value invalid"},
397 {0x052C0000, 0, 0,
398 "Illegal request, command sequence error"},
b0df54bb 399 {0x052C8000, 1, 0,
400 "Illegal request, dual adapter support not enabled"},
933916f3 401 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 402 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 403 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 404 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 405 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 406 "3140: Device bus not ready to ready transition"},
933916f3 407 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
408 "FFFB: SCSI bus was reset"},
409 {0x06290500, 0, 0,
410 "FFFE: SCSI bus transition to single ended"},
411 {0x06290600, 0, 0,
412 "FFFE: SCSI bus transition to LVD"},
933916f3 413 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 414 "FFFB: SCSI bus was reset by another initiator"},
933916f3 415 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 416 "3029: A device replacement has occurred"},
933916f3 417 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 418 "9051: IOA cache data exists for a missing or failed device"},
933916f3 419 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 420 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 421 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 422 "9025: Disk unit is not supported at its physical location"},
933916f3 423 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 424 "3020: IOA detected a SCSI bus configuration error"},
933916f3 425 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 426 "3150: SCSI bus configuration error"},
933916f3 427 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 428 "9074: Asymmetric advanced function disk configuration"},
933916f3 429 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 430 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 431 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 432 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 433 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 434 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 435 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 436 "9076: Configuration error, missing remote IOA"},
933916f3 437 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 438 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
439 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
440 "4070: Logically bad block written on device"},
933916f3 441 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 442 "9041: Array protection temporarily suspended"},
933916f3 443 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 444 "9042: Corrupt array parity detected on specified device"},
933916f3 445 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 446 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 447 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 448 "9071: Link operational transition"},
933916f3 449 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 450 "9072: Link not operational transition"},
933916f3 451 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 452 "9032: Array exposed but still protected"},
e435340c
BK
453 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
454 "70DD: Device forced failed by disrupt device command"},
933916f3 455 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 456 "4061: Multipath redundancy level got better"},
933916f3 457 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 458 "4060: Multipath redundancy level got worse"},
1da177e4
LT
459 {0x07270000, 0, 0,
460 "Failure due to other device"},
933916f3 461 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 462 "9008: IOA does not support functions expected by devices"},
933916f3 463 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 464 "9010: Cache data associated with attached devices cannot be found"},
933916f3 465 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 466 "9011: Cache data belongs to devices other than those attached"},
933916f3 467 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 468 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 469 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 470 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 471 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 472 "9022: Exposed array is missing a required device"},
933916f3 473 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 474 "9023: Array member(s) not at required physical locations"},
933916f3 475 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 476 "9024: Array not functional due to present hardware configuration"},
933916f3 477 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 478 "9026: Array not functional due to present hardware configuration"},
933916f3 479 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 480 "9027: Array is missing a device and parity is out of sync"},
933916f3 481 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 482 "9028: Maximum number of arrays already exist"},
933916f3 483 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 484 "9050: Required cache data cannot be located for a disk unit"},
933916f3 485 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 486 "9052: Cache data exists for a device that has been modified"},
933916f3 487 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 488 "9054: IOA resources not available due to previous problems"},
933916f3 489 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 490 "9092: Disk unit requires initialization before use"},
933916f3 491 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 492 "9029: Incorrect hardware configuration change has been detected"},
933916f3 493 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 494 "9060: One or more disk pairs are missing from an array"},
933916f3 495 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 496 "9061: One or more disks are missing from an array"},
933916f3 497 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 498 "9062: One or more disks are missing from an array"},
933916f3 499 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
500 "9063: Maximum number of functional arrays has been exceeded"},
501 {0x0B260000, 0, 0,
502 "Aborted command, invalid descriptor"},
503 {0x0B5A0000, 0, 0,
504 "Command terminated by host"}
505};
506
507static const struct ipr_ses_table_entry ipr_ses_table[] = {
508 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
509 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
510 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
511 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
512 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
513 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
514 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
515 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
516 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
517 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
518 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
519 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
520 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
521};
522
523/*
524 * Function Prototypes
525 */
526static int ipr_reset_alert(struct ipr_cmnd *);
527static void ipr_process_ccn(struct ipr_cmnd *);
528static void ipr_process_error(struct ipr_cmnd *);
529static void ipr_reset_ioa_job(struct ipr_cmnd *);
530static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
531 enum ipr_shutdown_type);
532
533#ifdef CONFIG_SCSI_IPR_TRACE
534/**
535 * ipr_trc_hook - Add a trace entry to the driver trace
536 * @ipr_cmd: ipr command struct
537 * @type: trace type
538 * @add_data: additional data
539 *
540 * Return value:
541 * none
542 **/
543static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
544 u8 type, u32 add_data)
545{
546 struct ipr_trace_entry *trace_entry;
547 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
548
549 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
550 trace_entry->time = jiffies;
551 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
552 trace_entry->type = type;
a32c055f
WB
553 if (ipr_cmd->ioa_cfg->sis64)
554 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
555 else
556 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 557 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
558 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
559 trace_entry->u.add_data = add_data;
560}
561#else
562#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
563#endif
564
565/**
566 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
567 * @ipr_cmd: ipr command struct
568 *
569 * Return value:
570 * none
571 **/
572static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
573{
574 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
575 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
576 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 577 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
578
579 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 580 ioarcb->data_transfer_length = 0;
1da177e4 581 ioarcb->read_data_transfer_length = 0;
a32c055f 582 ioarcb->ioadl_len = 0;
1da177e4 583 ioarcb->read_ioadl_len = 0;
a32c055f 584
96d21f00 585 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
586 ioarcb->u.sis64_addr_data.data_ioadl_addr =
587 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
588 ioasa64->u.gata.status = 0;
589 } else {
a32c055f
WB
590 ioarcb->write_ioadl_addr =
591 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
592 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 593 ioasa->u.gata.status = 0;
a32c055f
WB
594 }
595
96d21f00
WB
596 ioasa->hdr.ioasc = 0;
597 ioasa->hdr.residual_data_len = 0;
1da177e4 598 ipr_cmd->scsi_cmd = NULL;
35a39691 599 ipr_cmd->qc = NULL;
1da177e4
LT
600 ipr_cmd->sense_buffer[0] = 0;
601 ipr_cmd->dma_use_sg = 0;
602}
603
604/**
605 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
606 * @ipr_cmd: ipr command struct
607 *
608 * Return value:
609 * none
610 **/
611static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
612{
613 ipr_reinit_ipr_cmnd(ipr_cmd);
614 ipr_cmd->u.scratch = 0;
615 ipr_cmd->sibling = NULL;
616 init_timer(&ipr_cmd->timer);
617}
618
619/**
620 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
621 * @ioa_cfg: ioa config struct
622 *
623 * Return value:
624 * pointer to ipr command struct
625 **/
626static
627struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
628{
629 struct ipr_cmnd *ipr_cmd;
630
631 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
632 list_del(&ipr_cmd->queue);
633 ipr_init_ipr_cmnd(ipr_cmd);
634
635 return ipr_cmd;
636}
637
1da177e4
LT
638/**
639 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
640 * @ioa_cfg: ioa config struct
641 * @clr_ints: interrupts to clear
642 *
643 * This function masks all interrupts on the adapter, then clears the
644 * interrupts specified in the mask
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
650 u32 clr_ints)
651{
652 volatile u32 int_reg;
653
654 /* Stop new interrupts */
655 ioa_cfg->allow_interrupts = 0;
656
657 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
658 if (ioa_cfg->sis64)
659 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
660 else
661 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
662
663 /* Clear any pending interrupts */
214777ba
WB
664 if (ioa_cfg->sis64)
665 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
666 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
667 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
668}
669
670/**
671 * ipr_save_pcix_cmd_reg - Save PCI-X command register
672 * @ioa_cfg: ioa config struct
673 *
674 * Return value:
675 * 0 on success / -EIO on failure
676 **/
677static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
678{
679 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
680
7dce0e1c
BK
681 if (pcix_cmd_reg == 0)
682 return 0;
1da177e4
LT
683
684 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
685 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
686 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
687 return -EIO;
688 }
689
690 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
691 return 0;
692}
693
694/**
695 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
696 * @ioa_cfg: ioa config struct
697 *
698 * Return value:
699 * 0 on success / -EIO on failure
700 **/
701static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
702{
703 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
704
705 if (pcix_cmd_reg) {
706 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
707 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
708 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
709 return -EIO;
710 }
1da177e4
LT
711 }
712
713 return 0;
714}
715
35a39691
BK
716/**
717 * ipr_sata_eh_done - done function for aborted SATA commands
718 * @ipr_cmd: ipr command struct
719 *
720 * This function is invoked for ops generated to SATA
721 * devices which are being aborted.
722 *
723 * Return value:
724 * none
725 **/
726static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
727{
728 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
729 struct ata_queued_cmd *qc = ipr_cmd->qc;
730 struct ipr_sata_port *sata_port = qc->ap->private_data;
731
732 qc->err_mask |= AC_ERR_OTHER;
733 sata_port->ioasa.status |= ATA_BUSY;
734 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
735 ata_qc_complete(qc);
736}
737
1da177e4
LT
738/**
739 * ipr_scsi_eh_done - mid-layer done function for aborted ops
740 * @ipr_cmd: ipr command struct
741 *
742 * This function is invoked by the interrupt handler for
743 * ops generated by the SCSI mid-layer which are being aborted.
744 *
745 * Return value:
746 * none
747 **/
748static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
749{
750 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
751 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
752
753 scsi_cmd->result |= (DID_ERROR << 16);
754
63015bc9 755 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
756 scsi_cmd->scsi_done(scsi_cmd);
757 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
758}
759
760/**
761 * ipr_fail_all_ops - Fails all outstanding ops.
762 * @ioa_cfg: ioa config struct
763 *
764 * This function fails all outstanding ops.
765 *
766 * Return value:
767 * none
768 **/
769static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
770{
771 struct ipr_cmnd *ipr_cmd, *temp;
772
773 ENTER;
774 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
775 list_del(&ipr_cmd->queue);
776
96d21f00
WB
777 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
778 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
1da177e4
LT
779
780 if (ipr_cmd->scsi_cmd)
781 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
782 else if (ipr_cmd->qc)
783 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
784
785 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
786 del_timer(&ipr_cmd->timer);
787 ipr_cmd->done(ipr_cmd);
788 }
789
790 LEAVE;
791}
792
a32c055f
WB
793/**
794 * ipr_send_command - Send driver initiated requests.
795 * @ipr_cmd: ipr command struct
796 *
797 * This function sends a command to the adapter using the correct write call.
798 * In the case of sis64, calculate the ioarcb size required. Then or in the
799 * appropriate bits.
800 *
801 * Return value:
802 * none
803 **/
804static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
805{
806 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
807 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
808
809 if (ioa_cfg->sis64) {
810 /* The default size is 256 bytes */
811 send_dma_addr |= 0x1;
812
813 /* If the number of ioadls * size of ioadl > 128 bytes,
814 then use a 512 byte ioarcb */
815 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
816 send_dma_addr |= 0x4;
817 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
818 } else
819 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
820}
821
1da177e4
LT
822/**
823 * ipr_do_req - Send driver initiated requests.
824 * @ipr_cmd: ipr command struct
825 * @done: done function
826 * @timeout_func: timeout function
827 * @timeout: timeout value
828 *
829 * This function sends the specified command to the adapter with the
830 * timeout given. The done function is invoked on command completion.
831 *
832 * Return value:
833 * none
834 **/
835static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
836 void (*done) (struct ipr_cmnd *),
837 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
838{
839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
840
841 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
842
843 ipr_cmd->done = done;
844
845 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
846 ipr_cmd->timer.expires = jiffies + timeout;
847 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
848
849 add_timer(&ipr_cmd->timer);
850
851 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
852
a32c055f 853 ipr_send_command(ipr_cmd);
1da177e4
LT
854}
855
856/**
857 * ipr_internal_cmd_done - Op done function for an internally generated op.
858 * @ipr_cmd: ipr command struct
859 *
860 * This function is the op done function for an internally generated,
861 * blocking op. It simply wakes the sleeping thread.
862 *
863 * Return value:
864 * none
865 **/
866static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
867{
868 if (ipr_cmd->sibling)
869 ipr_cmd->sibling = NULL;
870 else
871 complete(&ipr_cmd->completion);
872}
873
a32c055f
WB
874/**
875 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
876 * @ipr_cmd: ipr command struct
877 * @dma_addr: dma address
878 * @len: transfer length
879 * @flags: ioadl flag value
880 *
881 * This function initializes an ioadl in the case where there is only a single
882 * descriptor.
883 *
884 * Return value:
885 * nothing
886 **/
887static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
888 u32 len, int flags)
889{
890 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
891 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
892
893 ipr_cmd->dma_use_sg = 1;
894
895 if (ipr_cmd->ioa_cfg->sis64) {
896 ioadl64->flags = cpu_to_be32(flags);
897 ioadl64->data_len = cpu_to_be32(len);
898 ioadl64->address = cpu_to_be64(dma_addr);
899
900 ipr_cmd->ioarcb.ioadl_len =
901 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
902 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
903 } else {
904 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
905 ioadl->address = cpu_to_be32(dma_addr);
906
907 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
908 ipr_cmd->ioarcb.read_ioadl_len =
909 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
910 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
911 } else {
912 ipr_cmd->ioarcb.ioadl_len =
913 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
914 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
915 }
916 }
917}
918
1da177e4
LT
919/**
920 * ipr_send_blocking_cmd - Send command and sleep on its completion.
921 * @ipr_cmd: ipr command struct
922 * @timeout_func: function to invoke if command times out
923 * @timeout: timeout
924 *
925 * Return value:
926 * none
927 **/
928static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
929 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
930 u32 timeout)
931{
932 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
933
934 init_completion(&ipr_cmd->completion);
935 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
936
937 spin_unlock_irq(ioa_cfg->host->host_lock);
938 wait_for_completion(&ipr_cmd->completion);
939 spin_lock_irq(ioa_cfg->host->host_lock);
940}
941
942/**
943 * ipr_send_hcam - Send an HCAM to the adapter.
944 * @ioa_cfg: ioa config struct
945 * @type: HCAM type
946 * @hostrcb: hostrcb struct
947 *
948 * This function will send a Host Controlled Async command to the adapter.
949 * If HCAMs are currently not allowed to be issued to the adapter, it will
950 * place the hostrcb on the free queue.
951 *
952 * Return value:
953 * none
954 **/
955static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
956 struct ipr_hostrcb *hostrcb)
957{
958 struct ipr_cmnd *ipr_cmd;
959 struct ipr_ioarcb *ioarcb;
960
961 if (ioa_cfg->allow_cmds) {
962 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
963 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
964 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
965
966 ipr_cmd->u.hostrcb = hostrcb;
967 ioarcb = &ipr_cmd->ioarcb;
968
969 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
970 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
971 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
972 ioarcb->cmd_pkt.cdb[1] = type;
973 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
974 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
975
a32c055f
WB
976 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
977 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
978
979 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
980 ipr_cmd->done = ipr_process_ccn;
981 else
982 ipr_cmd->done = ipr_process_error;
983
984 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
985
a32c055f 986 ipr_send_command(ipr_cmd);
1da177e4
LT
987 } else {
988 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
989 }
990}
991
3e7ebdfa
WB
992/**
993 * ipr_update_ata_class - Update the ata class in the resource entry
994 * @res: resource entry struct
995 * @proto: cfgte device bus protocol value
996 *
997 * Return value:
998 * none
999 **/
1000static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001{
1002 switch(proto) {
1003 case IPR_PROTO_SATA:
1004 case IPR_PROTO_SAS_STP:
1005 res->ata_class = ATA_DEV_ATA;
1006 break;
1007 case IPR_PROTO_SATA_ATAPI:
1008 case IPR_PROTO_SAS_STP_ATAPI:
1009 res->ata_class = ATA_DEV_ATAPI;
1010 break;
1011 default:
1012 res->ata_class = ATA_DEV_UNKNOWN;
1013 break;
1014 };
1015}
1016
1da177e4
LT
1017/**
1018 * ipr_init_res_entry - Initialize a resource entry struct.
1019 * @res: resource entry struct
3e7ebdfa 1020 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1021 *
1022 * Return value:
1023 * none
1024 **/
3e7ebdfa
WB
1025static void ipr_init_res_entry(struct ipr_resource_entry *res,
1026 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1027{
3e7ebdfa
WB
1028 int found = 0;
1029 unsigned int proto;
1030 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1031 struct ipr_resource_entry *gscsi_res = NULL;
1032
ee0a90fa 1033 res->needs_sync_complete = 0;
1da177e4
LT
1034 res->in_erp = 0;
1035 res->add_to_ml = 0;
1036 res->del_from_ml = 0;
1037 res->resetting_device = 0;
1038 res->sdev = NULL;
35a39691 1039 res->sata_port = NULL;
3e7ebdfa
WB
1040
1041 if (ioa_cfg->sis64) {
1042 proto = cfgtew->u.cfgte64->proto;
1043 res->res_flags = cfgtew->u.cfgte64->res_flags;
1044 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1045 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1046
1047 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1048 sizeof(res->res_path));
1049
1050 res->bus = 0;
0cb992ed
WB
1051 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1052 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1053 res->lun = scsilun_to_int(&res->dev_lun);
1054
1055 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1056 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1057 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1058 found = 1;
1059 res->target = gscsi_res->target;
1060 break;
1061 }
1062 }
1063 if (!found) {
1064 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1065 ioa_cfg->max_devs_supported);
1066 set_bit(res->target, ioa_cfg->target_ids);
1067 }
3e7ebdfa
WB
1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 res->target = 0;
1071 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 ioa_cfg->max_devs_supported);
1075 set_bit(res->target, ioa_cfg->array_ids);
1076 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 res->bus = IPR_VSET_VIRTUAL_BUS;
1078 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->vset_ids);
1081 } else {
1082 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 ioa_cfg->max_devs_supported);
1084 set_bit(res->target, ioa_cfg->target_ids);
1085 }
1086 } else {
1087 proto = cfgtew->u.cfgte->proto;
1088 res->qmodel = IPR_QUEUEING_MODEL(res);
1089 res->flags = cfgtew->u.cfgte->flags;
1090 if (res->flags & IPR_IS_IOA_RESOURCE)
1091 res->type = IPR_RES_TYPE_IOAFP;
1092 else
1093 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 res->target = cfgtew->u.cfgte->res_addr.target;
1097 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1098 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1099 }
1100
1101 ipr_update_ata_class(res, proto);
1102}
1103
1104/**
1105 * ipr_is_same_device - Determine if two devices are the same.
1106 * @res: resource entry struct
1107 * @cfgtew: config table entry wrapper struct
1108 *
1109 * Return value:
1110 * 1 if the devices are the same / 0 otherwise
1111 **/
1112static int ipr_is_same_device(struct ipr_resource_entry *res,
1113 struct ipr_config_table_entry_wrapper *cfgtew)
1114{
1115 if (res->ioa_cfg->sis64) {
1116 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1117 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1118 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1119 sizeof(cfgtew->u.cfgte64->lun))) {
1120 return 1;
1121 }
1122 } else {
1123 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1124 res->target == cfgtew->u.cfgte->res_addr.target &&
1125 res->lun == cfgtew->u.cfgte->res_addr.lun)
1126 return 1;
1127 }
1128
1129 return 0;
1130}
1131
1132/**
5adcbeb3 1133 * ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1134 * @res_path: resource path
1135 * @buf: buffer
1136 *
1137 * Return value:
1138 * pointer to buffer
1139 **/
5adcbeb3 1140static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1141{
1142 int i;
5adcbeb3 1143 char *p = buffer;
3e7ebdfa 1144
46d74563 1145 *p = '\0';
5adcbeb3
WB
1146 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1149
1150 return buffer;
1151}
1152
1153/**
1154 * ipr_update_res_entry - Update the resource entry.
1155 * @res: resource entry struct
1156 * @cfgtew: config table entry wrapper struct
1157 *
1158 * Return value:
1159 * none
1160 **/
1161static void ipr_update_res_entry(struct ipr_resource_entry *res,
1162 struct ipr_config_table_entry_wrapper *cfgtew)
1163{
1164 char buffer[IPR_MAX_RES_PATH_LENGTH];
1165 unsigned int proto;
1166 int new_path = 0;
1167
1168 if (res->ioa_cfg->sis64) {
1169 res->flags = cfgtew->u.cfgte64->flags;
1170 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1171 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1172
1173 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1174 sizeof(struct ipr_std_inq_data));
1175
1176 res->qmodel = IPR_QUEUEING_MODEL64(res);
1177 proto = cfgtew->u.cfgte64->proto;
1178 res->res_handle = cfgtew->u.cfgte64->res_handle;
1179 res->dev_id = cfgtew->u.cfgte64->dev_id;
1180
1181 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1182 sizeof(res->dev_lun.scsi_lun));
1183
1184 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1185 sizeof(res->res_path))) {
1186 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1187 sizeof(res->res_path));
1188 new_path = 1;
1189 }
1190
1191 if (res->sdev && new_path)
1192 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
5adcbeb3
WB
1193 ipr_format_res_path(res->res_path, buffer,
1194 sizeof(buffer)));
3e7ebdfa
WB
1195 } else {
1196 res->flags = cfgtew->u.cfgte->flags;
1197 if (res->flags & IPR_IS_IOA_RESOURCE)
1198 res->type = IPR_RES_TYPE_IOAFP;
1199 else
1200 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201
1202 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1203 sizeof(struct ipr_std_inq_data));
1204
1205 res->qmodel = IPR_QUEUEING_MODEL(res);
1206 proto = cfgtew->u.cfgte->proto;
1207 res->res_handle = cfgtew->u.cfgte->res_handle;
1208 }
1209
1210 ipr_update_ata_class(res, proto);
1211}
1212
1213/**
1214 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1215 * for the resource.
1216 * @res: resource entry struct
1217 * @cfgtew: config table entry wrapper struct
1218 *
1219 * Return value:
1220 * none
1221 **/
1222static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223{
1224 struct ipr_resource_entry *gscsi_res = NULL;
1225 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226
1227 if (!ioa_cfg->sis64)
1228 return;
1229
1230 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1231 clear_bit(res->target, ioa_cfg->array_ids);
1232 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1233 clear_bit(res->target, ioa_cfg->vset_ids);
1234 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1235 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1236 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1237 return;
1238 clear_bit(res->target, ioa_cfg->target_ids);
1239
1240 } else if (res->bus == 0)
1241 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1242}
1243
1244/**
1245 * ipr_handle_config_change - Handle a config change from the adapter
1246 * @ioa_cfg: ioa config struct
1247 * @hostrcb: hostrcb
1248 *
1249 * Return value:
1250 * none
1251 **/
1252static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1253 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1254{
1255 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1256 struct ipr_config_table_entry_wrapper cfgtew;
1257 __be32 cc_res_handle;
1258
1da177e4
LT
1259 u32 is_ndn = 1;
1260
3e7ebdfa
WB
1261 if (ioa_cfg->sis64) {
1262 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1263 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1264 } else {
1265 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1266 cc_res_handle = cfgtew.u.cfgte->res_handle;
1267 }
1da177e4
LT
1268
1269 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1270 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1271 is_ndn = 0;
1272 break;
1273 }
1274 }
1275
1276 if (is_ndn) {
1277 if (list_empty(&ioa_cfg->free_res_q)) {
1278 ipr_send_hcam(ioa_cfg,
1279 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1280 hostrcb);
1281 return;
1282 }
1283
1284 res = list_entry(ioa_cfg->free_res_q.next,
1285 struct ipr_resource_entry, queue);
1286
1287 list_del(&res->queue);
3e7ebdfa 1288 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1289 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1290 }
1291
3e7ebdfa 1292 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1293
1294 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1295 if (res->sdev) {
1da177e4 1296 res->del_from_ml = 1;
3e7ebdfa 1297 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1298 if (ioa_cfg->allow_ml_add_del)
1299 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1300 } else {
1301 ipr_clear_res_target(res);
1da177e4 1302 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1303 }
5767a1c4 1304 } else if (!res->sdev || res->del_from_ml) {
1da177e4
LT
1305 res->add_to_ml = 1;
1306 if (ioa_cfg->allow_ml_add_del)
1307 schedule_work(&ioa_cfg->work_q);
1308 }
1309
1310 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1311}
1312
1313/**
1314 * ipr_process_ccn - Op done function for a CCN.
1315 * @ipr_cmd: ipr command struct
1316 *
1317 * This function is the op done function for a configuration
1318 * change notification host controlled async from the adapter.
1319 *
1320 * Return value:
1321 * none
1322 **/
1323static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324{
1325 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1326 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1327 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1328
1329 list_del(&hostrcb->queue);
1330 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331
1332 if (ioasc) {
1333 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1334 dev_err(&ioa_cfg->pdev->dev,
1335 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336
1337 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1338 } else {
1339 ipr_handle_config_change(ioa_cfg, hostrcb);
1340 }
1341}
1342
8cf093e2
BK
1343/**
1344 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1345 * @i: index into buffer
1346 * @buf: string to modify
1347 *
1348 * This function will strip all trailing whitespace, pad the end
1349 * of the string with a single space, and NULL terminate the string.
1350 *
1351 * Return value:
1352 * new length of string
1353 **/
1354static int strip_and_pad_whitespace(int i, char *buf)
1355{
1356 while (i && buf[i] == ' ')
1357 i--;
1358 buf[i+1] = ' ';
1359 buf[i+2] = '\0';
1360 return i + 2;
1361}
1362
1363/**
1364 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1365 * @prefix: string to print at start of printk
1366 * @hostrcb: hostrcb pointer
1367 * @vpd: vendor/product id/sn struct
1368 *
1369 * Return value:
1370 * none
1371 **/
1372static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1373 struct ipr_vpd *vpd)
1374{
1375 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1376 int i = 0;
1377
1378 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1379 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380
1381 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1382 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383
1384 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1385 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386
1387 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1388}
1389
1da177e4
LT
1390/**
1391 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1392 * @vpd: vendor/product id/sn struct
1da177e4
LT
1393 *
1394 * Return value:
1395 * none
1396 **/
cfc32139 1397static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1398{
1399 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1400 + IPR_SERIAL_NUM_LEN];
1401
cfc32139 1402 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1403 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1404 IPR_PROD_ID_LEN);
1405 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1406 ipr_err("Vendor/Product ID: %s\n", buffer);
1407
cfc32139 1408 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1409 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1410 ipr_err(" Serial Number: %s\n", buffer);
1411}
1412
8cf093e2
BK
1413/**
1414 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1415 * @prefix: string to print at start of printk
1416 * @hostrcb: hostrcb pointer
1417 * @vpd: vendor/product id/sn/wwn struct
1418 *
1419 * Return value:
1420 * none
1421 **/
1422static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1423 struct ipr_ext_vpd *vpd)
1424{
1425 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1426 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1427 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1428}
1429
ee0f05b8 1430/**
1431 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1432 * @vpd: vendor/product id/sn/wwn struct
1433 *
1434 * Return value:
1435 * none
1436 **/
1437static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438{
1439 ipr_log_vpd(&vpd->vpd);
1440 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1441 be32_to_cpu(vpd->wwid[1]));
1442}
1443
1444/**
1445 * ipr_log_enhanced_cache_error - Log a cache error.
1446 * @ioa_cfg: ioa config struct
1447 * @hostrcb: hostrcb struct
1448 *
1449 * Return value:
1450 * none
1451 **/
1452static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1453 struct ipr_hostrcb *hostrcb)
1454{
4565e370
WB
1455 struct ipr_hostrcb_type_12_error *error;
1456
1457 if (ioa_cfg->sis64)
1458 error = &hostrcb->hcam.u.error64.u.type_12_error;
1459 else
1460 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1461
1462 ipr_err("-----Current Configuration-----\n");
1463 ipr_err("Cache Directory Card Information:\n");
1464 ipr_log_ext_vpd(&error->ioa_vpd);
1465 ipr_err("Adapter Card Information:\n");
1466 ipr_log_ext_vpd(&error->cfc_vpd);
1467
1468 ipr_err("-----Expected Configuration-----\n");
1469 ipr_err("Cache Directory Card Information:\n");
1470 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1471 ipr_err("Adapter Card Information:\n");
1472 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473
1474 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1475 be32_to_cpu(error->ioa_data[0]),
1476 be32_to_cpu(error->ioa_data[1]),
1477 be32_to_cpu(error->ioa_data[2]));
1478}
1479
1da177e4
LT
1480/**
1481 * ipr_log_cache_error - Log a cache error.
1482 * @ioa_cfg: ioa config struct
1483 * @hostrcb: hostrcb struct
1484 *
1485 * Return value:
1486 * none
1487 **/
1488static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489 struct ipr_hostrcb *hostrcb)
1490{
1491 struct ipr_hostrcb_type_02_error *error =
1492 &hostrcb->hcam.u.error.u.type_02_error;
1493
1494 ipr_err("-----Current Configuration-----\n");
1495 ipr_err("Cache Directory Card Information:\n");
cfc32139 1496 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1497 ipr_err("Adapter Card Information:\n");
cfc32139 1498 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1499
1500 ipr_err("-----Expected Configuration-----\n");
1501 ipr_err("Cache Directory Card Information:\n");
cfc32139 1502 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1503 ipr_err("Adapter Card Information:\n");
cfc32139 1504 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1505
1506 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1507 be32_to_cpu(error->ioa_data[0]),
1508 be32_to_cpu(error->ioa_data[1]),
1509 be32_to_cpu(error->ioa_data[2]));
1510}
1511
ee0f05b8 1512/**
1513 * ipr_log_enhanced_config_error - Log a configuration error.
1514 * @ioa_cfg: ioa config struct
1515 * @hostrcb: hostrcb struct
1516 *
1517 * Return value:
1518 * none
1519 **/
1520static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 struct ipr_hostrcb *hostrcb)
1522{
1523 int errors_logged, i;
1524 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1525 struct ipr_hostrcb_type_13_error *error;
1526
1527 error = &hostrcb->hcam.u.error.u.type_13_error;
1528 errors_logged = be32_to_cpu(error->errors_logged);
1529
1530 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1531 be32_to_cpu(error->errors_detected), errors_logged);
1532
1533 dev_entry = error->dev;
1534
1535 for (i = 0; i < errors_logged; i++, dev_entry++) {
1536 ipr_err_separator;
1537
1538 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1539 ipr_log_ext_vpd(&dev_entry->vpd);
1540
1541 ipr_err("-----New Device Information-----\n");
1542 ipr_log_ext_vpd(&dev_entry->new_vpd);
1543
1544 ipr_err("Cache Directory Card Information:\n");
1545 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546
1547 ipr_err("Adapter Card Information:\n");
1548 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1549 }
1550}
1551
4565e370
WB
1552/**
1553 * ipr_log_sis64_config_error - Log a device error.
1554 * @ioa_cfg: ioa config struct
1555 * @hostrcb: hostrcb struct
1556 *
1557 * Return value:
1558 * none
1559 **/
1560static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1561 struct ipr_hostrcb *hostrcb)
1562{
1563 int errors_logged, i;
1564 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1565 struct ipr_hostrcb_type_23_error *error;
1566 char buffer[IPR_MAX_RES_PATH_LENGTH];
1567
1568 error = &hostrcb->hcam.u.error64.u.type_23_error;
1569 errors_logged = be32_to_cpu(error->errors_logged);
1570
1571 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1572 be32_to_cpu(error->errors_detected), errors_logged);
1573
1574 dev_entry = error->dev;
1575
1576 for (i = 0; i < errors_logged; i++, dev_entry++) {
1577 ipr_err_separator;
1578
1579 ipr_err("Device %d : %s", i + 1,
5adcbeb3
WB
1580 ipr_format_res_path(dev_entry->res_path, buffer,
1581 sizeof(buffer)));
4565e370
WB
1582 ipr_log_ext_vpd(&dev_entry->vpd);
1583
1584 ipr_err("-----New Device Information-----\n");
1585 ipr_log_ext_vpd(&dev_entry->new_vpd);
1586
1587 ipr_err("Cache Directory Card Information:\n");
1588 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589
1590 ipr_err("Adapter Card Information:\n");
1591 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592 }
1593}
1594
1da177e4
LT
1595/**
1596 * ipr_log_config_error - Log a configuration error.
1597 * @ioa_cfg: ioa config struct
1598 * @hostrcb: hostrcb struct
1599 *
1600 * Return value:
1601 * none
1602 **/
1603static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1604 struct ipr_hostrcb *hostrcb)
1605{
1606 int errors_logged, i;
1607 struct ipr_hostrcb_device_data_entry *dev_entry;
1608 struct ipr_hostrcb_type_03_error *error;
1609
1610 error = &hostrcb->hcam.u.error.u.type_03_error;
1611 errors_logged = be32_to_cpu(error->errors_logged);
1612
1613 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1614 be32_to_cpu(error->errors_detected), errors_logged);
1615
cfc32139 1616 dev_entry = error->dev;
1da177e4
LT
1617
1618 for (i = 0; i < errors_logged; i++, dev_entry++) {
1619 ipr_err_separator;
1620
fa15b1f6 1621 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1622 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1623
1624 ipr_err("-----New Device Information-----\n");
cfc32139 1625 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1626
1627 ipr_err("Cache Directory Card Information:\n");
cfc32139 1628 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1629
1630 ipr_err("Adapter Card Information:\n");
cfc32139 1631 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1632
1633 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1634 be32_to_cpu(dev_entry->ioa_data[0]),
1635 be32_to_cpu(dev_entry->ioa_data[1]),
1636 be32_to_cpu(dev_entry->ioa_data[2]),
1637 be32_to_cpu(dev_entry->ioa_data[3]),
1638 be32_to_cpu(dev_entry->ioa_data[4]));
1639 }
1640}
1641
ee0f05b8 1642/**
1643 * ipr_log_enhanced_array_error - Log an array configuration error.
1644 * @ioa_cfg: ioa config struct
1645 * @hostrcb: hostrcb struct
1646 *
1647 * Return value:
1648 * none
1649 **/
1650static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1651 struct ipr_hostrcb *hostrcb)
1652{
1653 int i, num_entries;
1654 struct ipr_hostrcb_type_14_error *error;
1655 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1656 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657
1658 error = &hostrcb->hcam.u.error.u.type_14_error;
1659
1660 ipr_err_separator;
1661
1662 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1663 error->protection_level,
1664 ioa_cfg->host->host_no,
1665 error->last_func_vset_res_addr.bus,
1666 error->last_func_vset_res_addr.target,
1667 error->last_func_vset_res_addr.lun);
1668
1669 ipr_err_separator;
1670
1671 array_entry = error->array_member;
1672 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1673 ARRAY_SIZE(error->array_member));
ee0f05b8 1674
1675 for (i = 0; i < num_entries; i++, array_entry++) {
1676 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1677 continue;
1678
1679 if (be32_to_cpu(error->exposed_mode_adn) == i)
1680 ipr_err("Exposed Array Member %d:\n", i);
1681 else
1682 ipr_err("Array Member %d:\n", i);
1683
1684 ipr_log_ext_vpd(&array_entry->vpd);
1685 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1687 "Expected Location");
1688
1689 ipr_err_separator;
1690 }
1691}
1692
1da177e4
LT
1693/**
1694 * ipr_log_array_error - Log an array configuration error.
1695 * @ioa_cfg: ioa config struct
1696 * @hostrcb: hostrcb struct
1697 *
1698 * Return value:
1699 * none
1700 **/
1701static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1702 struct ipr_hostrcb *hostrcb)
1703{
1704 int i;
1705 struct ipr_hostrcb_type_04_error *error;
1706 struct ipr_hostrcb_array_data_entry *array_entry;
1707 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708
1709 error = &hostrcb->hcam.u.error.u.type_04_error;
1710
1711 ipr_err_separator;
1712
1713 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1714 error->protection_level,
1715 ioa_cfg->host->host_no,
1716 error->last_func_vset_res_addr.bus,
1717 error->last_func_vset_res_addr.target,
1718 error->last_func_vset_res_addr.lun);
1719
1720 ipr_err_separator;
1721
1722 array_entry = error->array_member;
1723
1724 for (i = 0; i < 18; i++) {
cfc32139 1725 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1726 continue;
1727
fa15b1f6 1728 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1729 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1730 else
1da177e4 1731 ipr_err("Array Member %d:\n", i);
1da177e4 1732
cfc32139 1733 ipr_log_vpd(&array_entry->vpd);
1da177e4 1734
fa15b1f6 1735 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1737 "Expected Location");
1da177e4
LT
1738
1739 ipr_err_separator;
1740
1741 if (i == 9)
1742 array_entry = error->array_member2;
1743 else
1744 array_entry++;
1745 }
1746}
1747
1748/**
b0df54bb 1749 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1750 * @ioa_cfg: ioa config struct
b0df54bb 1751 * @data: IOA error data
1752 * @len: data length
1da177e4
LT
1753 *
1754 * Return value:
1755 * none
1756 **/
ac719aba 1757static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1758{
1759 int i;
1da177e4 1760
b0df54bb 1761 if (len == 0)
1da177e4
LT
1762 return;
1763
ac719aba
BK
1764 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1765 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766
b0df54bb 1767 for (i = 0; i < len / 4; i += 4) {
1da177e4 1768 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1769 be32_to_cpu(data[i]),
1770 be32_to_cpu(data[i+1]),
1771 be32_to_cpu(data[i+2]),
1772 be32_to_cpu(data[i+3]));
1da177e4
LT
1773 }
1774}
1775
ee0f05b8 1776/**
1777 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1778 * @ioa_cfg: ioa config struct
1779 * @hostrcb: hostrcb struct
1780 *
1781 * Return value:
1782 * none
1783 **/
1784static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1785 struct ipr_hostrcb *hostrcb)
1786{
1787 struct ipr_hostrcb_type_17_error *error;
1788
4565e370
WB
1789 if (ioa_cfg->sis64)
1790 error = &hostrcb->hcam.u.error64.u.type_17_error;
1791 else
1792 error = &hostrcb->hcam.u.error.u.type_17_error;
1793
ee0f05b8 1794 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1795 strim(error->failure_reason);
ee0f05b8 1796
8cf093e2
BK
1797 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1798 be32_to_cpu(hostrcb->hcam.u.error.prc));
1799 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1800 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1801 be32_to_cpu(hostrcb->hcam.length) -
1802 (offsetof(struct ipr_hostrcb_error, u) +
1803 offsetof(struct ipr_hostrcb_type_17_error, data)));
1804}
1805
b0df54bb 1806/**
1807 * ipr_log_dual_ioa_error - Log a dual adapter error.
1808 * @ioa_cfg: ioa config struct
1809 * @hostrcb: hostrcb struct
1810 *
1811 * Return value:
1812 * none
1813 **/
1814static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1815 struct ipr_hostrcb *hostrcb)
1816{
1817 struct ipr_hostrcb_type_07_error *error;
1818
1819 error = &hostrcb->hcam.u.error.u.type_07_error;
1820 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1821 strim(error->failure_reason);
b0df54bb 1822
8cf093e2
BK
1823 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1824 be32_to_cpu(hostrcb->hcam.u.error.prc));
1825 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1826 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 1827 be32_to_cpu(hostrcb->hcam.length) -
1828 (offsetof(struct ipr_hostrcb_error, u) +
1829 offsetof(struct ipr_hostrcb_type_07_error, data)));
1830}
1831
49dc6a18
BK
1832static const struct {
1833 u8 active;
1834 char *desc;
1835} path_active_desc[] = {
1836 { IPR_PATH_NO_INFO, "Path" },
1837 { IPR_PATH_ACTIVE, "Active path" },
1838 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1839};
1840
1841static const struct {
1842 u8 state;
1843 char *desc;
1844} path_state_desc[] = {
1845 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1846 { IPR_PATH_HEALTHY, "is healthy" },
1847 { IPR_PATH_DEGRADED, "is degraded" },
1848 { IPR_PATH_FAILED, "is failed" }
1849};
1850
1851/**
1852 * ipr_log_fabric_path - Log a fabric path error
1853 * @hostrcb: hostrcb struct
1854 * @fabric: fabric descriptor
1855 *
1856 * Return value:
1857 * none
1858 **/
1859static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1860 struct ipr_hostrcb_fabric_desc *fabric)
1861{
1862 int i, j;
1863 u8 path_state = fabric->path_state;
1864 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1865 u8 state = path_state & IPR_PATH_STATE_MASK;
1866
1867 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1868 if (path_active_desc[i].active != active)
1869 continue;
1870
1871 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1872 if (path_state_desc[j].state != state)
1873 continue;
1874
1875 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1876 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1877 path_active_desc[i].desc, path_state_desc[j].desc,
1878 fabric->ioa_port);
1879 } else if (fabric->cascaded_expander == 0xff) {
1880 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1881 path_active_desc[i].desc, path_state_desc[j].desc,
1882 fabric->ioa_port, fabric->phy);
1883 } else if (fabric->phy == 0xff) {
1884 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1885 path_active_desc[i].desc, path_state_desc[j].desc,
1886 fabric->ioa_port, fabric->cascaded_expander);
1887 } else {
1888 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1889 path_active_desc[i].desc, path_state_desc[j].desc,
1890 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891 }
1892 return;
1893 }
1894 }
1895
1896 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1897 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1898}
1899
4565e370
WB
1900/**
1901 * ipr_log64_fabric_path - Log a fabric path error
1902 * @hostrcb: hostrcb struct
1903 * @fabric: fabric descriptor
1904 *
1905 * Return value:
1906 * none
1907 **/
1908static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1909 struct ipr_hostrcb64_fabric_desc *fabric)
1910{
1911 int i, j;
1912 u8 path_state = fabric->path_state;
1913 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1914 u8 state = path_state & IPR_PATH_STATE_MASK;
1915 char buffer[IPR_MAX_RES_PATH_LENGTH];
1916
1917 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1918 if (path_active_desc[i].active != active)
1919 continue;
1920
1921 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1922 if (path_state_desc[j].state != state)
1923 continue;
1924
1925 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1926 path_active_desc[i].desc, path_state_desc[j].desc,
5adcbeb3
WB
1927 ipr_format_res_path(fabric->res_path, buffer,
1928 sizeof(buffer)));
4565e370
WB
1929 return;
1930 }
1931 }
1932
1933 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
5adcbeb3 1934 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
4565e370
WB
1935}
1936
49dc6a18
BK
1937static const struct {
1938 u8 type;
1939 char *desc;
1940} path_type_desc[] = {
1941 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1942 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1943 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1944 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1945};
1946
1947static const struct {
1948 u8 status;
1949 char *desc;
1950} path_status_desc[] = {
1951 { IPR_PATH_CFG_NO_PROB, "Functional" },
1952 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1953 { IPR_PATH_CFG_FAILED, "Failed" },
1954 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1955 { IPR_PATH_NOT_DETECTED, "Missing" },
1956 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1957};
1958
1959static const char *link_rate[] = {
1960 "unknown",
1961 "disabled",
1962 "phy reset problem",
1963 "spinup hold",
1964 "port selector",
1965 "unknown",
1966 "unknown",
1967 "unknown",
1968 "1.5Gbps",
1969 "3.0Gbps",
1970 "unknown",
1971 "unknown",
1972 "unknown",
1973 "unknown",
1974 "unknown",
1975 "unknown"
1976};
1977
1978/**
1979 * ipr_log_path_elem - Log a fabric path element.
1980 * @hostrcb: hostrcb struct
1981 * @cfg: fabric path element struct
1982 *
1983 * Return value:
1984 * none
1985 **/
1986static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1987 struct ipr_hostrcb_config_element *cfg)
1988{
1989 int i, j;
1990 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1991 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992
1993 if (type == IPR_PATH_CFG_NOT_EXIST)
1994 return;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 if (path_type_desc[i].type != type)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 if (path_status_desc[j].status != status)
2002 continue;
2003
2004 if (type == IPR_PATH_CFG_IOA_PORT) {
2005 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2006 path_status_desc[j].desc, path_type_desc[i].desc,
2007 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 } else {
2010 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2011 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2012 path_status_desc[j].desc, path_type_desc[i].desc,
2013 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015 } else if (cfg->cascaded_expander == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2017 "WWN=%08X%08X\n", path_status_desc[j].desc,
2018 path_type_desc[i].desc, cfg->phy,
2019 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021 } else if (cfg->phy == 0xff) {
2022 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2023 "WWN=%08X%08X\n", path_status_desc[j].desc,
2024 path_type_desc[i].desc, cfg->cascaded_expander,
2025 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 } else {
2028 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2029 "WWN=%08X%08X\n", path_status_desc[j].desc,
2030 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2031 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033 }
2034 }
2035 return;
2036 }
2037 }
2038
2039 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2040 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2041 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2042 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2043}
2044
4565e370
WB
2045/**
2046 * ipr_log64_path_elem - Log a fabric path element.
2047 * @hostrcb: hostrcb struct
2048 * @cfg: fabric path element struct
2049 *
2050 * Return value:
2051 * none
2052 **/
2053static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2054 struct ipr_hostrcb64_config_element *cfg)
2055{
2056 int i, j;
2057 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2058 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2059 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2060 char buffer[IPR_MAX_RES_PATH_LENGTH];
2061
2062 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2063 return;
2064
2065 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2066 if (path_type_desc[i].type != type)
2067 continue;
2068
2069 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2070 if (path_status_desc[j].status != status)
2071 continue;
2072
2073 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2074 path_status_desc[j].desc, path_type_desc[i].desc,
5adcbeb3
WB
2075 ipr_format_res_path(cfg->res_path, buffer,
2076 sizeof(buffer)),
4565e370
WB
2077 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079 return;
2080 }
2081 }
2082 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2083 "WWN=%08X%08X\n", cfg->type_status,
5adcbeb3 2084 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
4565e370
WB
2085 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087}
2088
49dc6a18
BK
2089/**
2090 * ipr_log_fabric_error - Log a fabric error.
2091 * @ioa_cfg: ioa config struct
2092 * @hostrcb: hostrcb struct
2093 *
2094 * Return value:
2095 * none
2096 **/
2097static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2098 struct ipr_hostrcb *hostrcb)
2099{
2100 struct ipr_hostrcb_type_20_error *error;
2101 struct ipr_hostrcb_fabric_desc *fabric;
2102 struct ipr_hostrcb_config_element *cfg;
2103 int i, add_len;
2104
2105 error = &hostrcb->hcam.u.error.u.type_20_error;
2106 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2107 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108
2109 add_len = be32_to_cpu(hostrcb->hcam.length) -
2110 (offsetof(struct ipr_hostrcb_error, u) +
2111 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112
2113 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2114 ipr_log_fabric_path(hostrcb, fabric);
2115 for_each_fabric_cfg(fabric, cfg)
2116 ipr_log_path_elem(hostrcb, cfg);
2117
2118 add_len -= be16_to_cpu(fabric->length);
2119 fabric = (struct ipr_hostrcb_fabric_desc *)
2120 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2121 }
2122
ac719aba 2123 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2124}
2125
4565e370
WB
2126/**
2127 * ipr_log_sis64_array_error - Log a sis64 array error.
2128 * @ioa_cfg: ioa config struct
2129 * @hostrcb: hostrcb struct
2130 *
2131 * Return value:
2132 * none
2133 **/
2134static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2135 struct ipr_hostrcb *hostrcb)
2136{
2137 int i, num_entries;
2138 struct ipr_hostrcb_type_24_error *error;
2139 struct ipr_hostrcb64_array_data_entry *array_entry;
2140 char buffer[IPR_MAX_RES_PATH_LENGTH];
2141 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142
2143 error = &hostrcb->hcam.u.error64.u.type_24_error;
2144
2145 ipr_err_separator;
2146
2147 ipr_err("RAID %s Array Configuration: %s\n",
2148 error->protection_level,
5adcbeb3 2149 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
4565e370
WB
2150
2151 ipr_err_separator;
2152
2153 array_entry = error->array_member;
7262026f
WB
2154 num_entries = min_t(u32, error->num_entries,
2155 ARRAY_SIZE(error->array_member));
4565e370
WB
2156
2157 for (i = 0; i < num_entries; i++, array_entry++) {
2158
2159 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2160 continue;
2161
2162 if (error->exposed_mode_adn == i)
2163 ipr_err("Exposed Array Member %d:\n", i);
2164 else
2165 ipr_err("Array Member %d:\n", i);
2166
2167 ipr_err("Array Member %d:\n", i);
2168 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2169 ipr_err("Current Location: %s\n",
5adcbeb3
WB
2170 ipr_format_res_path(array_entry->res_path, buffer,
2171 sizeof(buffer)));
7262026f 2172 ipr_err("Expected Location: %s\n",
5adcbeb3
WB
2173 ipr_format_res_path(array_entry->expected_res_path,
2174 buffer, sizeof(buffer)));
4565e370
WB
2175
2176 ipr_err_separator;
2177 }
2178}
2179
2180/**
2181 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2182 * @ioa_cfg: ioa config struct
2183 * @hostrcb: hostrcb struct
2184 *
2185 * Return value:
2186 * none
2187 **/
2188static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189 struct ipr_hostrcb *hostrcb)
2190{
2191 struct ipr_hostrcb_type_30_error *error;
2192 struct ipr_hostrcb64_fabric_desc *fabric;
2193 struct ipr_hostrcb64_config_element *cfg;
2194 int i, add_len;
2195
2196 error = &hostrcb->hcam.u.error64.u.type_30_error;
2197
2198 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200
2201 add_len = be32_to_cpu(hostrcb->hcam.length) -
2202 (offsetof(struct ipr_hostrcb64_error, u) +
2203 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204
2205 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206 ipr_log64_fabric_path(hostrcb, fabric);
2207 for_each_fabric_cfg(fabric, cfg)
2208 ipr_log64_path_elem(hostrcb, cfg);
2209
2210 add_len -= be16_to_cpu(fabric->length);
2211 fabric = (struct ipr_hostrcb64_fabric_desc *)
2212 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2213 }
2214
2215 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2216}
2217
b0df54bb 2218/**
2219 * ipr_log_generic_error - Log an adapter error.
2220 * @ioa_cfg: ioa config struct
2221 * @hostrcb: hostrcb struct
2222 *
2223 * Return value:
2224 * none
2225 **/
2226static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2227 struct ipr_hostrcb *hostrcb)
2228{
ac719aba 2229 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2230 be32_to_cpu(hostrcb->hcam.length));
2231}
2232
1da177e4
LT
2233/**
2234 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2235 * @ioasc: IOASC
2236 *
2237 * This function will return the index of into the ipr_error_table
2238 * for the specified IOASC. If the IOASC is not in the table,
2239 * 0 will be returned, which points to the entry used for unknown errors.
2240 *
2241 * Return value:
2242 * index into the ipr_error_table
2243 **/
2244static u32 ipr_get_error(u32 ioasc)
2245{
2246 int i;
2247
2248 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2249 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2250 return i;
2251
2252 return 0;
2253}
2254
2255/**
2256 * ipr_handle_log_data - Log an adapter error.
2257 * @ioa_cfg: ioa config struct
2258 * @hostrcb: hostrcb struct
2259 *
2260 * This function logs an adapter error to the system.
2261 *
2262 * Return value:
2263 * none
2264 **/
2265static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2266 struct ipr_hostrcb *hostrcb)
2267{
2268 u32 ioasc;
2269 int error_index;
2270
2271 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2272 return;
2273
2274 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2275 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276
4565e370
WB
2277 if (ioa_cfg->sis64)
2278 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2279 else
2280 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2281
4565e370
WB
2282 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2283 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2284 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2285 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2286 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2287 }
2288
2289 error_index = ipr_get_error(ioasc);
2290
2291 if (!ipr_error_table[error_index].log_hcam)
2292 return;
2293
49dc6a18 2294 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2295
2296 /* Set indication we have logged an error */
2297 ioa_cfg->errors_logged++;
2298
933916f3 2299 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2300 return;
cf852037 2301 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2302 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2303
2304 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2305 case IPR_HOST_RCB_OVERLAY_ID_2:
2306 ipr_log_cache_error(ioa_cfg, hostrcb);
2307 break;
2308 case IPR_HOST_RCB_OVERLAY_ID_3:
2309 ipr_log_config_error(ioa_cfg, hostrcb);
2310 break;
2311 case IPR_HOST_RCB_OVERLAY_ID_4:
2312 case IPR_HOST_RCB_OVERLAY_ID_6:
2313 ipr_log_array_error(ioa_cfg, hostrcb);
2314 break;
b0df54bb 2315 case IPR_HOST_RCB_OVERLAY_ID_7:
2316 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2317 break;
ee0f05b8 2318 case IPR_HOST_RCB_OVERLAY_ID_12:
2319 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2320 break;
2321 case IPR_HOST_RCB_OVERLAY_ID_13:
2322 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_14:
2325 case IPR_HOST_RCB_OVERLAY_ID_16:
2326 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2327 break;
2328 case IPR_HOST_RCB_OVERLAY_ID_17:
2329 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2330 break;
49dc6a18
BK
2331 case IPR_HOST_RCB_OVERLAY_ID_20:
2332 ipr_log_fabric_error(ioa_cfg, hostrcb);
2333 break;
4565e370
WB
2334 case IPR_HOST_RCB_OVERLAY_ID_23:
2335 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2336 break;
2337 case IPR_HOST_RCB_OVERLAY_ID_24:
2338 case IPR_HOST_RCB_OVERLAY_ID_26:
2339 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2340 break;
2341 case IPR_HOST_RCB_OVERLAY_ID_30:
2342 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2343 break;
cf852037 2344 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2345 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2346 default:
a9cfca96 2347 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2348 break;
2349 }
2350}
2351
2352/**
2353 * ipr_process_error - Op done function for an adapter error log.
2354 * @ipr_cmd: ipr command struct
2355 *
2356 * This function is the op done function for an error log host
2357 * controlled async from the adapter. It will log the error and
2358 * send the HCAM back to the adapter.
2359 *
2360 * Return value:
2361 * none
2362 **/
2363static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364{
2365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2366 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2367 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2368 u32 fd_ioasc;
2369
2370 if (ioa_cfg->sis64)
2371 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2372 else
2373 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2374
2375 list_del(&hostrcb->queue);
2376 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377
2378 if (!ioasc) {
2379 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2380 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2381 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2382 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2383 dev_err(&ioa_cfg->pdev->dev,
2384 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2385 }
2386
2387 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2388}
2389
2390/**
2391 * ipr_timeout - An internally generated op has timed out.
2392 * @ipr_cmd: ipr command struct
2393 *
2394 * This function blocks host requests and initiates an
2395 * adapter reset.
2396 *
2397 * Return value:
2398 * none
2399 **/
2400static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401{
2402 unsigned long lock_flags = 0;
2403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404
2405 ENTER;
2406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407
2408 ioa_cfg->errors_logged++;
2409 dev_err(&ioa_cfg->pdev->dev,
2410 "Adapter being reset due to command timeout.\n");
2411
2412 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413 ioa_cfg->sdt_state = GET_DUMP;
2414
2415 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417
2418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419 LEAVE;
2420}
2421
2422/**
2423 * ipr_oper_timeout - Adapter timed out transitioning to operational
2424 * @ipr_cmd: ipr command struct
2425 *
2426 * This function blocks host requests and initiates an
2427 * adapter reset.
2428 *
2429 * Return value:
2430 * none
2431 **/
2432static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433{
2434 unsigned long lock_flags = 0;
2435 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436
2437 ENTER;
2438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439
2440 ioa_cfg->errors_logged++;
2441 dev_err(&ioa_cfg->pdev->dev,
2442 "Adapter timed out transitioning to operational.\n");
2443
2444 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445 ioa_cfg->sdt_state = GET_DUMP;
2446
2447 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2448 if (ipr_fastfail)
2449 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2451 }
2452
2453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2454 LEAVE;
2455}
2456
2457/**
2458 * ipr_reset_reload - Reset/Reload the IOA
2459 * @ioa_cfg: ioa config struct
2460 * @shutdown_type: shutdown type
2461 *
2462 * This function resets the adapter and re-initializes it.
2463 * This function assumes that all new host commands have been stopped.
2464 * Return value:
2465 * SUCCESS / FAILED
2466 **/
2467static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2468 enum ipr_shutdown_type shutdown_type)
2469{
2470 if (!ioa_cfg->in_reset_reload)
2471 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472
2473 spin_unlock_irq(ioa_cfg->host->host_lock);
2474 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475 spin_lock_irq(ioa_cfg->host->host_lock);
2476
2477 /* If we got hit with a host reset while we were already resetting
2478 the adapter for some reason, and the reset failed. */
2479 if (ioa_cfg->ioa_is_dead) {
2480 ipr_trace;
2481 return FAILED;
2482 }
2483
2484 return SUCCESS;
2485}
2486
2487/**
2488 * ipr_find_ses_entry - Find matching SES in SES table
2489 * @res: resource entry struct of SES
2490 *
2491 * Return value:
2492 * pointer to SES table entry / NULL on failure
2493 **/
2494static const struct ipr_ses_table_entry *
2495ipr_find_ses_entry(struct ipr_resource_entry *res)
2496{
2497 int i, j, matches;
3e7ebdfa 2498 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2499 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500
2501 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2502 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2503 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2504 vpids = &res->std_inq_data.vpids;
2505 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2506 matches++;
2507 else
2508 break;
2509 } else
2510 matches++;
2511 }
2512
2513 if (matches == IPR_PROD_ID_LEN)
2514 return ste;
2515 }
2516
2517 return NULL;
2518}
2519
2520/**
2521 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2522 * @ioa_cfg: ioa config struct
2523 * @bus: SCSI bus
2524 * @bus_width: bus width
2525 *
2526 * Return value:
2527 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2528 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2529 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2530 * max 160MHz = max 320MB/sec).
2531 **/
2532static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533{
2534 struct ipr_resource_entry *res;
2535 const struct ipr_ses_table_entry *ste;
2536 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537
2538 /* Loop through each config table entry in the config table buffer */
2539 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2540 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2541 continue;
2542
3e7ebdfa 2543 if (bus != res->bus)
1da177e4
LT
2544 continue;
2545
2546 if (!(ste = ipr_find_ses_entry(res)))
2547 continue;
2548
2549 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2550 }
2551
2552 return max_xfer_rate;
2553}
2554
2555/**
2556 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2557 * @ioa_cfg: ioa config struct
2558 * @max_delay: max delay in micro-seconds to wait
2559 *
2560 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561 *
2562 * Return value:
2563 * 0 on success / other on failure
2564 **/
2565static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566{
2567 volatile u32 pcii_reg;
2568 int delay = 1;
2569
2570 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2571 while (delay < max_delay) {
2572 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573
2574 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2575 return 0;
2576
2577 /* udelay cannot be used if delay is more than a few milliseconds */
2578 if ((delay / 1000) > MAX_UDELAY_MS)
2579 mdelay(delay / 1000);
2580 else
2581 udelay(delay);
2582
2583 delay += delay;
2584 }
2585 return -EIO;
2586}
2587
dcbad00e
WB
2588/**
2589 * ipr_get_sis64_dump_data_section - Dump IOA memory
2590 * @ioa_cfg: ioa config struct
2591 * @start_addr: adapter address to dump
2592 * @dest: destination kernel buffer
2593 * @length_in_words: length to dump in 4 byte words
2594 *
2595 * Return value:
2596 * 0 on success
2597 **/
2598static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2599 u32 start_addr,
2600 __be32 *dest, u32 length_in_words)
2601{
2602 int i;
2603
2604 for (i = 0; i < length_in_words; i++) {
2605 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2607 dest++;
2608 }
2609
2610 return 0;
2611}
2612
1da177e4
LT
2613/**
2614 * ipr_get_ldump_data_section - Dump IOA memory
2615 * @ioa_cfg: ioa config struct
2616 * @start_addr: adapter address to dump
2617 * @dest: destination kernel buffer
2618 * @length_in_words: length to dump in 4 byte words
2619 *
2620 * Return value:
2621 * 0 on success / -EIO on failure
2622 **/
2623static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2624 u32 start_addr,
2625 __be32 *dest, u32 length_in_words)
2626{
2627 volatile u32 temp_pcii_reg;
2628 int i, delay = 0;
2629
dcbad00e
WB
2630 if (ioa_cfg->sis64)
2631 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2632 dest, length_in_words);
2633
1da177e4
LT
2634 /* Write IOA interrupt reg starting LDUMP state */
2635 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2636 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2637
2638 /* Wait for IO debug acknowledge */
2639 if (ipr_wait_iodbg_ack(ioa_cfg,
2640 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2641 dev_err(&ioa_cfg->pdev->dev,
2642 "IOA dump long data transfer timeout\n");
2643 return -EIO;
2644 }
2645
2646 /* Signal LDUMP interlocked - clear IO debug ack */
2647 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2648 ioa_cfg->regs.clr_interrupt_reg);
2649
2650 /* Write Mailbox with starting address */
2651 writel(start_addr, ioa_cfg->ioa_mailbox);
2652
2653 /* Signal address valid - clear IOA Reset alert */
2654 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2655 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2656
2657 for (i = 0; i < length_in_words; i++) {
2658 /* Wait for IO debug acknowledge */
2659 if (ipr_wait_iodbg_ack(ioa_cfg,
2660 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2661 dev_err(&ioa_cfg->pdev->dev,
2662 "IOA dump short data transfer timeout\n");
2663 return -EIO;
2664 }
2665
2666 /* Read data from mailbox and increment destination pointer */
2667 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2668 dest++;
2669
2670 /* For all but the last word of data, signal data received */
2671 if (i < (length_in_words - 1)) {
2672 /* Signal dump data received - Clear IO debug Ack */
2673 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674 ioa_cfg->regs.clr_interrupt_reg);
2675 }
2676 }
2677
2678 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2679 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2680 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2681
2682 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2683 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2684
2685 /* Signal dump data received - Clear IO debug Ack */
2686 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2687 ioa_cfg->regs.clr_interrupt_reg);
2688
2689 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2690 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2691 temp_pcii_reg =
214777ba 2692 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2693
2694 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2695 return 0;
2696
2697 udelay(10);
2698 delay += 10;
2699 }
2700
2701 return 0;
2702}
2703
2704#ifdef CONFIG_SCSI_IPR_DUMP
2705/**
2706 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2707 * @ioa_cfg: ioa config struct
2708 * @pci_address: adapter address
2709 * @length: length of data to copy
2710 *
2711 * Copy data from PCI adapter to kernel buffer.
2712 * Note: length MUST be a 4 byte multiple
2713 * Return value:
2714 * 0 on success / other on failure
2715 **/
2716static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 unsigned long pci_address, u32 length)
2718{
2719 int bytes_copied = 0;
4d4dd706 2720 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2721 __be32 *page;
2722 unsigned long lock_flags = 0;
2723 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724
4d4dd706
KSS
2725 if (ioa_cfg->sis64)
2726 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2727 else
2728 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2729
1da177e4 2730 while (bytes_copied < length &&
4d4dd706 2731 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2732 if (ioa_dump->page_offset >= PAGE_SIZE ||
2733 ioa_dump->page_offset == 0) {
2734 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2735
2736 if (!page) {
2737 ipr_trace;
2738 return bytes_copied;
2739 }
2740
2741 ioa_dump->page_offset = 0;
2742 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2743 ioa_dump->next_page_index++;
2744 } else
2745 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2746
2747 rem_len = length - bytes_copied;
2748 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2749 cur_len = min(rem_len, rem_page_len);
2750
2751 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2752 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2753 rc = -EIO;
2754 } else {
2755 rc = ipr_get_ldump_data_section(ioa_cfg,
2756 pci_address + bytes_copied,
2757 &page[ioa_dump->page_offset / 4],
2758 (cur_len / sizeof(u32)));
2759 }
2760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2761
2762 if (!rc) {
2763 ioa_dump->page_offset += cur_len;
2764 bytes_copied += cur_len;
2765 } else {
2766 ipr_trace;
2767 break;
2768 }
2769 schedule();
2770 }
2771
2772 return bytes_copied;
2773}
2774
2775/**
2776 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2777 * @hdr: dump entry header struct
2778 *
2779 * Return value:
2780 * nothing
2781 **/
2782static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2783{
2784 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2785 hdr->num_elems = 1;
2786 hdr->offset = sizeof(*hdr);
2787 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2788}
2789
2790/**
2791 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2792 * @ioa_cfg: ioa config struct
2793 * @driver_dump: driver dump struct
2794 *
2795 * Return value:
2796 * nothing
2797 **/
2798static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2799 struct ipr_driver_dump *driver_dump)
2800{
2801 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2802
2803 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2804 driver_dump->ioa_type_entry.hdr.len =
2805 sizeof(struct ipr_dump_ioa_type_entry) -
2806 sizeof(struct ipr_dump_entry_header);
2807 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2808 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2809 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2810 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2811 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2812 ucode_vpd->minor_release[1];
2813 driver_dump->hdr.num_entries++;
2814}
2815
2816/**
2817 * ipr_dump_version_data - Fill in the driver version in the dump.
2818 * @ioa_cfg: ioa config struct
2819 * @driver_dump: driver dump struct
2820 *
2821 * Return value:
2822 * nothing
2823 **/
2824static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2825 struct ipr_driver_dump *driver_dump)
2826{
2827 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2828 driver_dump->version_entry.hdr.len =
2829 sizeof(struct ipr_dump_version_entry) -
2830 sizeof(struct ipr_dump_entry_header);
2831 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2832 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2833 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2834 driver_dump->hdr.num_entries++;
2835}
2836
2837/**
2838 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2839 * @ioa_cfg: ioa config struct
2840 * @driver_dump: driver dump struct
2841 *
2842 * Return value:
2843 * nothing
2844 **/
2845static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2846 struct ipr_driver_dump *driver_dump)
2847{
2848 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2849 driver_dump->trace_entry.hdr.len =
2850 sizeof(struct ipr_dump_trace_entry) -
2851 sizeof(struct ipr_dump_entry_header);
2852 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2853 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2854 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2855 driver_dump->hdr.num_entries++;
2856}
2857
2858/**
2859 * ipr_dump_location_data - Fill in the IOA location in the dump.
2860 * @ioa_cfg: ioa config struct
2861 * @driver_dump: driver dump struct
2862 *
2863 * Return value:
2864 * nothing
2865 **/
2866static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2867 struct ipr_driver_dump *driver_dump)
2868{
2869 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2870 driver_dump->location_entry.hdr.len =
2871 sizeof(struct ipr_dump_location_entry) -
2872 sizeof(struct ipr_dump_entry_header);
2873 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2874 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2875 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2876 driver_dump->hdr.num_entries++;
2877}
2878
2879/**
2880 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2881 * @ioa_cfg: ioa config struct
2882 * @dump: dump struct
2883 *
2884 * Return value:
2885 * nothing
2886 **/
2887static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2888{
2889 unsigned long start_addr, sdt_word;
2890 unsigned long lock_flags = 0;
2891 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2892 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
2893 u32 num_entries, max_num_entries, start_off, end_off;
2894 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 2895 struct ipr_sdt *sdt;
dcbad00e 2896 int valid = 1;
1da177e4
LT
2897 int i;
2898
2899 ENTER;
2900
2901 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2902
41e9a696 2903 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
2904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2905 return;
2906 }
2907
110def85
WB
2908 if (ioa_cfg->sis64) {
2909 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2910 ssleep(IPR_DUMP_DELAY_SECONDS);
2911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2912 }
2913
1da177e4
LT
2914 start_addr = readl(ioa_cfg->ioa_mailbox);
2915
dcbad00e 2916 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2917 dev_err(&ioa_cfg->pdev->dev,
2918 "Invalid dump table format: %lx\n", start_addr);
2919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2920 return;
2921 }
2922
2923 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2924
2925 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2926
2927 /* Initialize the overall dump header */
2928 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2929 driver_dump->hdr.num_entries = 1;
2930 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2931 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2932 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2933 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2934
2935 ipr_dump_version_data(ioa_cfg, driver_dump);
2936 ipr_dump_location_data(ioa_cfg, driver_dump);
2937 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2938 ipr_dump_trace_data(ioa_cfg, driver_dump);
2939
2940 /* Update dump_header */
2941 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2942
2943 /* IOA Dump entry */
2944 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
2945 ioa_dump->hdr.len = 0;
2946 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2947 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2948
2949 /* First entries in sdt are actually a list of dump addresses and
2950 lengths to gather the real dump data. sdt represents the pointer
2951 to the ioa generated dump table. Dump data will be extracted based
2952 on entries in this table */
2953 sdt = &ioa_dump->sdt;
2954
4d4dd706
KSS
2955 if (ioa_cfg->sis64) {
2956 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2957 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2958 } else {
2959 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2960 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2961 }
2962
2963 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2964 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 2965 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 2966 bytes_to_copy / sizeof(__be32));
1da177e4
LT
2967
2968 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
2969 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2970 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
2971 dev_err(&ioa_cfg->pdev->dev,
2972 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2973 rc, be32_to_cpu(sdt->hdr.state));
2974 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2975 ioa_cfg->sdt_state = DUMP_OBTAINED;
2976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2977 return;
2978 }
2979
2980 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2981
4d4dd706
KSS
2982 if (num_entries > max_num_entries)
2983 num_entries = max_num_entries;
2984
2985 /* Update dump length to the actual data to be copied */
2986 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2987 if (ioa_cfg->sis64)
2988 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2989 else
2990 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
2991
2992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2993
2994 for (i = 0; i < num_entries; i++) {
4d4dd706 2995 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
2996 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2997 break;
2998 }
2999
3000 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3001 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3002 if (ioa_cfg->sis64)
3003 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3004 else {
3005 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3006 end_off = be32_to_cpu(sdt->entry[i].end_token);
3007
3008 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3009 bytes_to_copy = end_off - start_off;
3010 else
3011 valid = 0;
3012 }
3013 if (valid) {
4d4dd706 3014 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3015 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3016 continue;
3017 }
3018
3019 /* Copy data from adapter to driver buffers */
3020 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3021 bytes_to_copy);
3022
3023 ioa_dump->hdr.len += bytes_copied;
3024
3025 if (bytes_copied != bytes_to_copy) {
3026 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3027 break;
3028 }
3029 }
3030 }
3031 }
3032
3033 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3034
3035 /* Update dump_header */
3036 driver_dump->hdr.len += ioa_dump->hdr.len;
3037 wmb();
3038 ioa_cfg->sdt_state = DUMP_OBTAINED;
3039 LEAVE;
3040}
3041
3042#else
3043#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3044#endif
3045
3046/**
3047 * ipr_release_dump - Free adapter dump memory
3048 * @kref: kref struct
3049 *
3050 * Return value:
3051 * nothing
3052 **/
3053static void ipr_release_dump(struct kref *kref)
3054{
3055 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3056 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3057 unsigned long lock_flags = 0;
3058 int i;
3059
3060 ENTER;
3061 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3062 ioa_cfg->dump = NULL;
3063 ioa_cfg->sdt_state = INACTIVE;
3064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065
3066 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3067 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3068
4d4dd706 3069 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3070 kfree(dump);
3071 LEAVE;
3072}
3073
3074/**
3075 * ipr_worker_thread - Worker thread
c4028958 3076 * @work: ioa config struct
1da177e4
LT
3077 *
3078 * Called at task level from a work thread. This function takes care
3079 * of adding and removing device from the mid-layer as configuration
3080 * changes are detected by the adapter.
3081 *
3082 * Return value:
3083 * nothing
3084 **/
c4028958 3085static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3086{
3087 unsigned long lock_flags;
3088 struct ipr_resource_entry *res;
3089 struct scsi_device *sdev;
3090 struct ipr_dump *dump;
c4028958
DH
3091 struct ipr_ioa_cfg *ioa_cfg =
3092 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3093 u8 bus, target, lun;
3094 int did_work;
3095
3096 ENTER;
3097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3098
41e9a696 3099 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3100 dump = ioa_cfg->dump;
3101 if (!dump) {
3102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3103 return;
3104 }
3105 kref_get(&dump->kref);
3106 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3107 ipr_get_ioa_dump(ioa_cfg, dump);
3108 kref_put(&dump->kref, ipr_release_dump);
3109
3110 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3111 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3112 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3113 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3114 return;
3115 }
3116
3117restart:
3118 do {
3119 did_work = 0;
3120 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3121 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3122 return;
3123 }
3124
3125 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3126 if (res->del_from_ml && res->sdev) {
3127 did_work = 1;
3128 sdev = res->sdev;
3129 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3130 if (!res->add_to_ml)
3131 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3132 else
3133 res->del_from_ml = 0;
1da177e4
LT
3134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3135 scsi_remove_device(sdev);
3136 scsi_device_put(sdev);
3137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3138 }
3139 break;
3140 }
3141 }
3142 } while(did_work);
3143
3144 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3145 if (res->add_to_ml) {
3e7ebdfa
WB
3146 bus = res->bus;
3147 target = res->target;
3148 lun = res->lun;
1121b794 3149 res->add_to_ml = 0;
1da177e4
LT
3150 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3151 scsi_add_device(ioa_cfg->host, bus, target, lun);
3152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153 goto restart;
3154 }
3155 }
3156
3157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3158 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3159 LEAVE;
3160}
3161
3162#ifdef CONFIG_SCSI_IPR_TRACE
3163/**
3164 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3165 * @filp: open sysfs file
1da177e4 3166 * @kobj: kobject struct
91a69029 3167 * @bin_attr: bin_attribute struct
1da177e4
LT
3168 * @buf: buffer
3169 * @off: offset
3170 * @count: buffer size
3171 *
3172 * Return value:
3173 * number of bytes printed to buffer
3174 **/
2c3c8bea 3175static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3176 struct bin_attribute *bin_attr,
3177 char *buf, loff_t off, size_t count)
1da177e4 3178{
ee959b00
TJ
3179 struct device *dev = container_of(kobj, struct device, kobj);
3180 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3181 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3182 unsigned long lock_flags = 0;
d777aaf3 3183 ssize_t ret;
1da177e4
LT
3184
3185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3186 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3187 IPR_TRACE_SIZE);
1da177e4 3188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3189
3190 return ret;
1da177e4
LT
3191}
3192
3193static struct bin_attribute ipr_trace_attr = {
3194 .attr = {
3195 .name = "trace",
3196 .mode = S_IRUGO,
3197 },
3198 .size = 0,
3199 .read = ipr_read_trace,
3200};
3201#endif
3202
3203/**
3204 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3205 * @dev: class device struct
3206 * @buf: buffer
1da177e4
LT
3207 *
3208 * Return value:
3209 * number of bytes printed to buffer
3210 **/
ee959b00
TJ
3211static ssize_t ipr_show_fw_version(struct device *dev,
3212 struct device_attribute *attr, char *buf)
1da177e4 3213{
ee959b00 3214 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3215 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3216 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3217 unsigned long lock_flags = 0;
3218 int len;
3219
3220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3221 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3222 ucode_vpd->major_release, ucode_vpd->card_type,
3223 ucode_vpd->minor_release[0],
3224 ucode_vpd->minor_release[1]);
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3226 return len;
3227}
3228
ee959b00 3229static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3230 .attr = {
3231 .name = "fw_version",
3232 .mode = S_IRUGO,
3233 },
3234 .show = ipr_show_fw_version,
3235};
3236
3237/**
3238 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3239 * @dev: class device struct
3240 * @buf: buffer
1da177e4
LT
3241 *
3242 * Return value:
3243 * number of bytes printed to buffer
3244 **/
ee959b00
TJ
3245static ssize_t ipr_show_log_level(struct device *dev,
3246 struct device_attribute *attr, char *buf)
1da177e4 3247{
ee959b00 3248 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3249 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3250 unsigned long lock_flags = 0;
3251 int len;
3252
3253 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3255 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3256 return len;
3257}
3258
3259/**
3260 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3261 * @dev: class device struct
3262 * @buf: buffer
1da177e4
LT
3263 *
3264 * Return value:
3265 * number of bytes printed to buffer
3266 **/
ee959b00
TJ
3267static ssize_t ipr_store_log_level(struct device *dev,
3268 struct device_attribute *attr,
1da177e4
LT
3269 const char *buf, size_t count)
3270{
ee959b00 3271 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3273 unsigned long lock_flags = 0;
3274
3275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3276 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3278 return strlen(buf);
3279}
3280
ee959b00 3281static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3282 .attr = {
3283 .name = "log_level",
3284 .mode = S_IRUGO | S_IWUSR,
3285 },
3286 .show = ipr_show_log_level,
3287 .store = ipr_store_log_level
3288};
3289
3290/**
3291 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3292 * @dev: device struct
3293 * @buf: buffer
3294 * @count: buffer size
1da177e4
LT
3295 *
3296 * This function will reset the adapter and wait a reasonable
3297 * amount of time for any errors that the adapter might log.
3298 *
3299 * Return value:
3300 * count on success / other on failure
3301 **/
ee959b00
TJ
3302static ssize_t ipr_store_diagnostics(struct device *dev,
3303 struct device_attribute *attr,
1da177e4
LT
3304 const char *buf, size_t count)
3305{
ee959b00 3306 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3307 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3308 unsigned long lock_flags = 0;
3309 int rc = count;
3310
3311 if (!capable(CAP_SYS_ADMIN))
3312 return -EACCES;
3313
1da177e4 3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3315 while(ioa_cfg->in_reset_reload) {
3316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3317 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3318 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319 }
3320
1da177e4
LT
3321 ioa_cfg->errors_logged = 0;
3322 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3323
3324 if (ioa_cfg->in_reset_reload) {
3325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3326 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3327
3328 /* Wait for a second for any errors to be logged */
3329 msleep(1000);
3330 } else {
3331 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332 return -EIO;
3333 }
3334
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3337 rc = -EIO;
3338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3339
3340 return rc;
3341}
3342
ee959b00 3343static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3344 .attr = {
3345 .name = "run_diagnostics",
3346 .mode = S_IWUSR,
3347 },
3348 .store = ipr_store_diagnostics
3349};
3350
f37eb54b 3351/**
3352 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3353 * @class_dev: device struct
3354 * @buf: buffer
f37eb54b 3355 *
3356 * Return value:
3357 * number of bytes printed to buffer
3358 **/
ee959b00
TJ
3359static ssize_t ipr_show_adapter_state(struct device *dev,
3360 struct device_attribute *attr, char *buf)
f37eb54b 3361{
ee959b00 3362 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3364 unsigned long lock_flags = 0;
3365 int len;
3366
3367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 if (ioa_cfg->ioa_is_dead)
3369 len = snprintf(buf, PAGE_SIZE, "offline\n");
3370 else
3371 len = snprintf(buf, PAGE_SIZE, "online\n");
3372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3373 return len;
3374}
3375
3376/**
3377 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3378 * @dev: device struct
3379 * @buf: buffer
3380 * @count: buffer size
f37eb54b 3381 *
3382 * This function will change the adapter's state.
3383 *
3384 * Return value:
3385 * count on success / other on failure
3386 **/
ee959b00
TJ
3387static ssize_t ipr_store_adapter_state(struct device *dev,
3388 struct device_attribute *attr,
f37eb54b 3389 const char *buf, size_t count)
3390{
ee959b00 3391 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3393 unsigned long lock_flags;
3394 int result = count;
3395
3396 if (!capable(CAP_SYS_ADMIN))
3397 return -EACCES;
3398
3399 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3400 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3401 ioa_cfg->ioa_is_dead = 0;
3402 ioa_cfg->reset_retries = 0;
3403 ioa_cfg->in_ioa_bringdown = 0;
3404 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3405 }
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3408
3409 return result;
3410}
3411
ee959b00 3412static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3413 .attr = {
49dd0961 3414 .name = "online_state",
f37eb54b 3415 .mode = S_IRUGO | S_IWUSR,
3416 },
3417 .show = ipr_show_adapter_state,
3418 .store = ipr_store_adapter_state
3419};
3420
1da177e4
LT
3421/**
3422 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3423 * @dev: device struct
3424 * @buf: buffer
3425 * @count: buffer size
1da177e4
LT
3426 *
3427 * This function will reset the adapter.
3428 *
3429 * Return value:
3430 * count on success / other on failure
3431 **/
ee959b00
TJ
3432static ssize_t ipr_store_reset_adapter(struct device *dev,
3433 struct device_attribute *attr,
1da177e4
LT
3434 const char *buf, size_t count)
3435{
ee959b00 3436 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3437 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438 unsigned long lock_flags;
3439 int result = count;
3440
3441 if (!capable(CAP_SYS_ADMIN))
3442 return -EACCES;
3443
3444 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3445 if (!ioa_cfg->in_reset_reload)
3446 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3449
3450 return result;
3451}
3452
ee959b00 3453static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3454 .attr = {
3455 .name = "reset_host",
3456 .mode = S_IWUSR,
3457 },
3458 .store = ipr_store_reset_adapter
3459};
3460
3461/**
3462 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3463 * @buf_len: buffer length
3464 *
3465 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3466 * list to use for microcode download
3467 *
3468 * Return value:
3469 * pointer to sglist / NULL on failure
3470 **/
3471static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3472{
3473 int sg_size, order, bsize_elem, num_elem, i, j;
3474 struct ipr_sglist *sglist;
3475 struct scatterlist *scatterlist;
3476 struct page *page;
3477
3478 /* Get the minimum size per scatter/gather element */
3479 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3480
3481 /* Get the actual size per element */
3482 order = get_order(sg_size);
3483
3484 /* Determine the actual number of bytes per element */
3485 bsize_elem = PAGE_SIZE * (1 << order);
3486
3487 /* Determine the actual number of sg entries needed */
3488 if (buf_len % bsize_elem)
3489 num_elem = (buf_len / bsize_elem) + 1;
3490 else
3491 num_elem = buf_len / bsize_elem;
3492
3493 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3494 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3495 (sizeof(struct scatterlist) * (num_elem - 1)),
3496 GFP_KERNEL);
3497
3498 if (sglist == NULL) {
3499 ipr_trace;
3500 return NULL;
3501 }
3502
1da177e4 3503 scatterlist = sglist->scatterlist;
45711f1a 3504 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3505
3506 sglist->order = order;
3507 sglist->num_sg = num_elem;
3508
3509 /* Allocate a bunch of sg elements */
3510 for (i = 0; i < num_elem; i++) {
3511 page = alloc_pages(GFP_KERNEL, order);
3512 if (!page) {
3513 ipr_trace;
3514
3515 /* Free up what we already allocated */
3516 for (j = i - 1; j >= 0; j--)
45711f1a 3517 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3518 kfree(sglist);
3519 return NULL;
3520 }
3521
642f1490 3522 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3523 }
3524
3525 return sglist;
3526}
3527
3528/**
3529 * ipr_free_ucode_buffer - Frees a microcode download buffer
3530 * @p_dnld: scatter/gather list pointer
3531 *
3532 * Free a DMA'able ucode download buffer previously allocated with
3533 * ipr_alloc_ucode_buffer
3534 *
3535 * Return value:
3536 * nothing
3537 **/
3538static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3539{
3540 int i;
3541
3542 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3543 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3544
3545 kfree(sglist);
3546}
3547
3548/**
3549 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3550 * @sglist: scatter/gather list pointer
3551 * @buffer: buffer pointer
3552 * @len: buffer length
3553 *
3554 * Copy a microcode image from a user buffer into a buffer allocated by
3555 * ipr_alloc_ucode_buffer
3556 *
3557 * Return value:
3558 * 0 on success / other on failure
3559 **/
3560static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3561 u8 *buffer, u32 len)
3562{
3563 int bsize_elem, i, result = 0;
3564 struct scatterlist *scatterlist;
3565 void *kaddr;
3566
3567 /* Determine the actual number of bytes per element */
3568 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3569
3570 scatterlist = sglist->scatterlist;
3571
3572 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3573 struct page *page = sg_page(&scatterlist[i]);
3574
3575 kaddr = kmap(page);
1da177e4 3576 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3577 kunmap(page);
1da177e4
LT
3578
3579 scatterlist[i].length = bsize_elem;
3580
3581 if (result != 0) {
3582 ipr_trace;
3583 return result;
3584 }
3585 }
3586
3587 if (len % bsize_elem) {
45711f1a
JA
3588 struct page *page = sg_page(&scatterlist[i]);
3589
3590 kaddr = kmap(page);
1da177e4 3591 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3592 kunmap(page);
1da177e4
LT
3593
3594 scatterlist[i].length = len % bsize_elem;
3595 }
3596
3597 sglist->buffer_len = len;
3598 return result;
3599}
3600
a32c055f
WB
3601/**
3602 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3603 * @ipr_cmd: ipr command struct
3604 * @sglist: scatter/gather list
3605 *
3606 * Builds a microcode download IOA data list (IOADL).
3607 *
3608 **/
3609static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3610 struct ipr_sglist *sglist)
3611{
3612 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3613 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3614 struct scatterlist *scatterlist = sglist->scatterlist;
3615 int i;
3616
3617 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3618 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3619 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3620
3621 ioarcb->ioadl_len =
3622 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3623 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3624 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3625 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3626 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3627 }
3628
3629 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3630}
3631
1da177e4 3632/**
12baa420 3633 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3634 * @ipr_cmd: ipr command struct
3635 * @sglist: scatter/gather list
1da177e4 3636 *
12baa420 3637 * Builds a microcode download IOA data list (IOADL).
1da177e4 3638 *
1da177e4 3639 **/
12baa420 3640static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3641 struct ipr_sglist *sglist)
1da177e4 3642{
1da177e4 3643 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3644 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3645 struct scatterlist *scatterlist = sglist->scatterlist;
3646 int i;
3647
12baa420 3648 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3649 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3650 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3651
3652 ioarcb->ioadl_len =
1da177e4
LT
3653 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3654
3655 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3656 ioadl[i].flags_and_data_len =
3657 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3658 ioadl[i].address =
3659 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3660 }
3661
12baa420 3662 ioadl[i-1].flags_and_data_len |=
3663 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3664}
3665
3666/**
3667 * ipr_update_ioa_ucode - Update IOA's microcode
3668 * @ioa_cfg: ioa config struct
3669 * @sglist: scatter/gather list
3670 *
3671 * Initiate an adapter reset to update the IOA's microcode
3672 *
3673 * Return value:
3674 * 0 on success / -EIO on failure
3675 **/
3676static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3677 struct ipr_sglist *sglist)
3678{
3679 unsigned long lock_flags;
3680
3681 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
3682 while(ioa_cfg->in_reset_reload) {
3683 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3684 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3686 }
12baa420 3687
3688 if (ioa_cfg->ucode_sglist) {
3689 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690 dev_err(&ioa_cfg->pdev->dev,
3691 "Microcode download already in progress\n");
3692 return -EIO;
1da177e4 3693 }
12baa420 3694
3695 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3696 sglist->num_sg, DMA_TO_DEVICE);
3697
3698 if (!sglist->num_dma_sg) {
3699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3700 dev_err(&ioa_cfg->pdev->dev,
3701 "Failed to map microcode download buffer!\n");
1da177e4
LT
3702 return -EIO;
3703 }
3704
12baa420 3705 ioa_cfg->ucode_sglist = sglist;
3706 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3711 ioa_cfg->ucode_sglist = NULL;
3712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3713 return 0;
3714}
3715
3716/**
3717 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3718 * @class_dev: device struct
3719 * @buf: buffer
3720 * @count: buffer size
1da177e4
LT
3721 *
3722 * This function will update the firmware on the adapter.
3723 *
3724 * Return value:
3725 * count on success / other on failure
3726 **/
ee959b00
TJ
3727static ssize_t ipr_store_update_fw(struct device *dev,
3728 struct device_attribute *attr,
3729 const char *buf, size_t count)
1da177e4 3730{
ee959b00 3731 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3732 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3733 struct ipr_ucode_image_header *image_hdr;
3734 const struct firmware *fw_entry;
3735 struct ipr_sglist *sglist;
1da177e4
LT
3736 char fname[100];
3737 char *src;
3738 int len, result, dnld_size;
3739
3740 if (!capable(CAP_SYS_ADMIN))
3741 return -EACCES;
3742
3743 len = snprintf(fname, 99, "%s", buf);
3744 fname[len-1] = '\0';
3745
3746 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3747 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3748 return -EIO;
3749 }
3750
3751 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3752
1da177e4
LT
3753 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3754 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3755 sglist = ipr_alloc_ucode_buffer(dnld_size);
3756
3757 if (!sglist) {
3758 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3759 release_firmware(fw_entry);
3760 return -ENOMEM;
3761 }
3762
3763 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3764
3765 if (result) {
3766 dev_err(&ioa_cfg->pdev->dev,
3767 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3768 goto out;
1da177e4
LT
3769 }
3770
14ed9cc7
WB
3771 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3772
12baa420 3773 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3774
12baa420 3775 if (!result)
3776 result = count;
3777out:
1da177e4
LT
3778 ipr_free_ucode_buffer(sglist);
3779 release_firmware(fw_entry);
12baa420 3780 return result;
1da177e4
LT
3781}
3782
ee959b00 3783static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3784 .attr = {
3785 .name = "update_fw",
3786 .mode = S_IWUSR,
3787 },
3788 .store = ipr_store_update_fw
3789};
3790
75576bb9
WB
3791/**
3792 * ipr_show_fw_type - Show the adapter's firmware type.
3793 * @dev: class device struct
3794 * @buf: buffer
3795 *
3796 * Return value:
3797 * number of bytes printed to buffer
3798 **/
3799static ssize_t ipr_show_fw_type(struct device *dev,
3800 struct device_attribute *attr, char *buf)
3801{
3802 struct Scsi_Host *shost = class_to_shost(dev);
3803 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3804 unsigned long lock_flags = 0;
3805 int len;
3806
3807 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3808 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3810 return len;
3811}
3812
3813static struct device_attribute ipr_ioa_fw_type_attr = {
3814 .attr = {
3815 .name = "fw_type",
3816 .mode = S_IRUGO,
3817 },
3818 .show = ipr_show_fw_type
3819};
3820
ee959b00 3821static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3822 &ipr_fw_version_attr,
3823 &ipr_log_level_attr,
3824 &ipr_diagnostics_attr,
f37eb54b 3825 &ipr_ioa_state_attr,
1da177e4
LT
3826 &ipr_ioa_reset_attr,
3827 &ipr_update_fw_attr,
75576bb9 3828 &ipr_ioa_fw_type_attr,
1da177e4
LT
3829 NULL,
3830};
3831
3832#ifdef CONFIG_SCSI_IPR_DUMP
3833/**
3834 * ipr_read_dump - Dump the adapter
2c3c8bea 3835 * @filp: open sysfs file
1da177e4 3836 * @kobj: kobject struct
91a69029 3837 * @bin_attr: bin_attribute struct
1da177e4
LT
3838 * @buf: buffer
3839 * @off: offset
3840 * @count: buffer size
3841 *
3842 * Return value:
3843 * number of bytes printed to buffer
3844 **/
2c3c8bea 3845static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3846 struct bin_attribute *bin_attr,
3847 char *buf, loff_t off, size_t count)
1da177e4 3848{
ee959b00 3849 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3850 struct Scsi_Host *shost = class_to_shost(cdev);
3851 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3852 struct ipr_dump *dump;
3853 unsigned long lock_flags = 0;
3854 char *src;
4d4dd706 3855 int len, sdt_end;
1da177e4
LT
3856 size_t rc = count;
3857
3858 if (!capable(CAP_SYS_ADMIN))
3859 return -EACCES;
3860
3861 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3862 dump = ioa_cfg->dump;
3863
3864 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3865 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3866 return 0;
3867 }
3868 kref_get(&dump->kref);
3869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870
3871 if (off > dump->driver_dump.hdr.len) {
3872 kref_put(&dump->kref, ipr_release_dump);
3873 return 0;
3874 }
3875
3876 if (off + count > dump->driver_dump.hdr.len) {
3877 count = dump->driver_dump.hdr.len - off;
3878 rc = count;
3879 }
3880
3881 if (count && off < sizeof(dump->driver_dump)) {
3882 if (off + count > sizeof(dump->driver_dump))
3883 len = sizeof(dump->driver_dump) - off;
3884 else
3885 len = count;
3886 src = (u8 *)&dump->driver_dump + off;
3887 memcpy(buf, src, len);
3888 buf += len;
3889 off += len;
3890 count -= len;
3891 }
3892
3893 off -= sizeof(dump->driver_dump);
3894
4d4dd706
KSS
3895 if (ioa_cfg->sis64)
3896 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3897 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3898 sizeof(struct ipr_sdt_entry));
3899 else
3900 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3901 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3902
3903 if (count && off < sdt_end) {
3904 if (off + count > sdt_end)
3905 len = sdt_end - off;
1da177e4
LT
3906 else
3907 len = count;
3908 src = (u8 *)&dump->ioa_dump + off;
3909 memcpy(buf, src, len);
3910 buf += len;
3911 off += len;
3912 count -= len;
3913 }
3914
4d4dd706 3915 off -= sdt_end;
1da177e4
LT
3916
3917 while (count) {
3918 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3919 len = PAGE_ALIGN(off) - off;
3920 else
3921 len = count;
3922 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3923 src += off & ~PAGE_MASK;
3924 memcpy(buf, src, len);
3925 buf += len;
3926 off += len;
3927 count -= len;
3928 }
3929
3930 kref_put(&dump->kref, ipr_release_dump);
3931 return rc;
3932}
3933
3934/**
3935 * ipr_alloc_dump - Prepare for adapter dump
3936 * @ioa_cfg: ioa config struct
3937 *
3938 * Return value:
3939 * 0 on success / other on failure
3940 **/
3941static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3942{
3943 struct ipr_dump *dump;
4d4dd706 3944 __be32 **ioa_data;
1da177e4
LT
3945 unsigned long lock_flags = 0;
3946
0bc42e35 3947 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
3948
3949 if (!dump) {
3950 ipr_err("Dump memory allocation failed\n");
3951 return -ENOMEM;
3952 }
3953
4d4dd706
KSS
3954 if (ioa_cfg->sis64)
3955 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3956 else
3957 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3958
3959 if (!ioa_data) {
3960 ipr_err("Dump memory allocation failed\n");
3961 kfree(dump);
3962 return -ENOMEM;
3963 }
3964
3965 dump->ioa_dump.ioa_data = ioa_data;
3966
1da177e4
LT
3967 kref_init(&dump->kref);
3968 dump->ioa_cfg = ioa_cfg;
3969
3970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3971
3972 if (INACTIVE != ioa_cfg->sdt_state) {
3973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 3974 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3975 kfree(dump);
3976 return 0;
3977 }
3978
3979 ioa_cfg->dump = dump;
3980 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3981 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3982 ioa_cfg->dump_taken = 1;
3983 schedule_work(&ioa_cfg->work_q);
3984 }
3985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3986
1da177e4
LT
3987 return 0;
3988}
3989
3990/**
3991 * ipr_free_dump - Free adapter dump memory
3992 * @ioa_cfg: ioa config struct
3993 *
3994 * Return value:
3995 * 0 on success / other on failure
3996 **/
3997static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3998{
3999 struct ipr_dump *dump;
4000 unsigned long lock_flags = 0;
4001
4002 ENTER;
4003
4004 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4005 dump = ioa_cfg->dump;
4006 if (!dump) {
4007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4008 return 0;
4009 }
4010
4011 ioa_cfg->dump = NULL;
4012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4013
4014 kref_put(&dump->kref, ipr_release_dump);
4015
4016 LEAVE;
4017 return 0;
4018}
4019
4020/**
4021 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4022 * @filp: open sysfs file
1da177e4 4023 * @kobj: kobject struct
91a69029 4024 * @bin_attr: bin_attribute struct
1da177e4
LT
4025 * @buf: buffer
4026 * @off: offset
4027 * @count: buffer size
4028 *
4029 * Return value:
4030 * number of bytes printed to buffer
4031 **/
2c3c8bea 4032static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4033 struct bin_attribute *bin_attr,
4034 char *buf, loff_t off, size_t count)
1da177e4 4035{
ee959b00 4036 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4037 struct Scsi_Host *shost = class_to_shost(cdev);
4038 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4039 int rc;
4040
4041 if (!capable(CAP_SYS_ADMIN))
4042 return -EACCES;
4043
4044 if (buf[0] == '1')
4045 rc = ipr_alloc_dump(ioa_cfg);
4046 else if (buf[0] == '0')
4047 rc = ipr_free_dump(ioa_cfg);
4048 else
4049 return -EINVAL;
4050
4051 if (rc)
4052 return rc;
4053 else
4054 return count;
4055}
4056
4057static struct bin_attribute ipr_dump_attr = {
4058 .attr = {
4059 .name = "dump",
4060 .mode = S_IRUSR | S_IWUSR,
4061 },
4062 .size = 0,
4063 .read = ipr_read_dump,
4064 .write = ipr_write_dump
4065};
4066#else
4067static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4068#endif
4069
4070/**
4071 * ipr_change_queue_depth - Change the device's queue depth
4072 * @sdev: scsi device struct
4073 * @qdepth: depth to set
e881a172 4074 * @reason: calling context
1da177e4
LT
4075 *
4076 * Return value:
4077 * actual depth set
4078 **/
e881a172
MC
4079static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4080 int reason)
1da177e4 4081{
35a39691
BK
4082 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4083 struct ipr_resource_entry *res;
4084 unsigned long lock_flags = 0;
4085
e881a172
MC
4086 if (reason != SCSI_QDEPTH_DEFAULT)
4087 return -EOPNOTSUPP;
4088
35a39691
BK
4089 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4090 res = (struct ipr_resource_entry *)sdev->hostdata;
4091
4092 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4093 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4094 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4095
1da177e4
LT
4096 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4097 return sdev->queue_depth;
4098}
4099
4100/**
4101 * ipr_change_queue_type - Change the device's queue type
4102 * @dsev: scsi device struct
4103 * @tag_type: type of tags to use
4104 *
4105 * Return value:
4106 * actual queue type set
4107 **/
4108static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4109{
4110 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4111 struct ipr_resource_entry *res;
4112 unsigned long lock_flags = 0;
4113
4114 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4115 res = (struct ipr_resource_entry *)sdev->hostdata;
4116
4117 if (res) {
4118 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4119 /*
4120 * We don't bother quiescing the device here since the
4121 * adapter firmware does it for us.
4122 */
4123 scsi_set_tag_type(sdev, tag_type);
4124
4125 if (tag_type)
4126 scsi_activate_tcq(sdev, sdev->queue_depth);
4127 else
4128 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4129 } else
4130 tag_type = 0;
4131 } else
4132 tag_type = 0;
4133
4134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4135 return tag_type;
4136}
4137
4138/**
4139 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4140 * @dev: device struct
46d74563 4141 * @attr: device attribute structure
1da177e4
LT
4142 * @buf: buffer
4143 *
4144 * Return value:
4145 * number of bytes printed to buffer
4146 **/
10523b3b 4147static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4148{
4149 struct scsi_device *sdev = to_scsi_device(dev);
4150 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4151 struct ipr_resource_entry *res;
4152 unsigned long lock_flags = 0;
4153 ssize_t len = -ENXIO;
4154
4155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4156 res = (struct ipr_resource_entry *)sdev->hostdata;
4157 if (res)
3e7ebdfa 4158 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160 return len;
4161}
4162
4163static struct device_attribute ipr_adapter_handle_attr = {
4164 .attr = {
4165 .name = "adapter_handle",
4166 .mode = S_IRUSR,
4167 },
4168 .show = ipr_show_adapter_handle
4169};
4170
3e7ebdfa 4171/**
5adcbeb3
WB
4172 * ipr_show_resource_path - Show the resource path or the resource address for
4173 * this device.
3e7ebdfa 4174 * @dev: device struct
46d74563 4175 * @attr: device attribute structure
3e7ebdfa
WB
4176 * @buf: buffer
4177 *
4178 * Return value:
4179 * number of bytes printed to buffer
4180 **/
4181static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4182{
4183 struct scsi_device *sdev = to_scsi_device(dev);
4184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4185 struct ipr_resource_entry *res;
4186 unsigned long lock_flags = 0;
4187 ssize_t len = -ENXIO;
4188 char buffer[IPR_MAX_RES_PATH_LENGTH];
4189
4190 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4191 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4192 if (res && ioa_cfg->sis64)
3e7ebdfa 4193 len = snprintf(buf, PAGE_SIZE, "%s\n",
5adcbeb3
WB
4194 ipr_format_res_path(res->res_path, buffer,
4195 sizeof(buffer)));
4196 else if (res)
4197 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4198 res->bus, res->target, res->lun);
4199
3e7ebdfa
WB
4200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4201 return len;
4202}
4203
4204static struct device_attribute ipr_resource_path_attr = {
4205 .attr = {
4206 .name = "resource_path",
75576bb9 4207 .mode = S_IRUGO,
3e7ebdfa
WB
4208 },
4209 .show = ipr_show_resource_path
4210};
4211
46d74563
WB
4212/**
4213 * ipr_show_device_id - Show the device_id for this device.
4214 * @dev: device struct
4215 * @attr: device attribute structure
4216 * @buf: buffer
4217 *
4218 * Return value:
4219 * number of bytes printed to buffer
4220 **/
4221static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4222{
4223 struct scsi_device *sdev = to_scsi_device(dev);
4224 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4225 struct ipr_resource_entry *res;
4226 unsigned long lock_flags = 0;
4227 ssize_t len = -ENXIO;
4228
4229 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4230 res = (struct ipr_resource_entry *)sdev->hostdata;
4231 if (res && ioa_cfg->sis64)
4232 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4233 else if (res)
4234 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4235
4236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237 return len;
4238}
4239
4240static struct device_attribute ipr_device_id_attr = {
4241 .attr = {
4242 .name = "device_id",
4243 .mode = S_IRUGO,
4244 },
4245 .show = ipr_show_device_id
4246};
4247
75576bb9
WB
4248/**
4249 * ipr_show_resource_type - Show the resource type for this device.
4250 * @dev: device struct
46d74563 4251 * @attr: device attribute structure
75576bb9
WB
4252 * @buf: buffer
4253 *
4254 * Return value:
4255 * number of bytes printed to buffer
4256 **/
4257static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4258{
4259 struct scsi_device *sdev = to_scsi_device(dev);
4260 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4261 struct ipr_resource_entry *res;
4262 unsigned long lock_flags = 0;
4263 ssize_t len = -ENXIO;
4264
4265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4266 res = (struct ipr_resource_entry *)sdev->hostdata;
4267
4268 if (res)
4269 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4270
4271 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4272 return len;
4273}
4274
4275static struct device_attribute ipr_resource_type_attr = {
4276 .attr = {
4277 .name = "resource_type",
4278 .mode = S_IRUGO,
4279 },
4280 .show = ipr_show_resource_type
4281};
4282
1da177e4
LT
4283static struct device_attribute *ipr_dev_attrs[] = {
4284 &ipr_adapter_handle_attr,
3e7ebdfa 4285 &ipr_resource_path_attr,
46d74563 4286 &ipr_device_id_attr,
75576bb9 4287 &ipr_resource_type_attr,
1da177e4
LT
4288 NULL,
4289};
4290
4291/**
4292 * ipr_biosparam - Return the HSC mapping
4293 * @sdev: scsi device struct
4294 * @block_device: block device pointer
4295 * @capacity: capacity of the device
4296 * @parm: Array containing returned HSC values.
4297 *
4298 * This function generates the HSC parms that fdisk uses.
4299 * We want to make sure we return something that places partitions
4300 * on 4k boundaries for best performance with the IOA.
4301 *
4302 * Return value:
4303 * 0 on success
4304 **/
4305static int ipr_biosparam(struct scsi_device *sdev,
4306 struct block_device *block_device,
4307 sector_t capacity, int *parm)
4308{
4309 int heads, sectors;
4310 sector_t cylinders;
4311
4312 heads = 128;
4313 sectors = 32;
4314
4315 cylinders = capacity;
4316 sector_div(cylinders, (128 * 32));
4317
4318 /* return result */
4319 parm[0] = heads;
4320 parm[1] = sectors;
4321 parm[2] = cylinders;
4322
4323 return 0;
4324}
4325
35a39691
BK
4326/**
4327 * ipr_find_starget - Find target based on bus/target.
4328 * @starget: scsi target struct
4329 *
4330 * Return value:
4331 * resource entry pointer if found / NULL if not found
4332 **/
4333static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4334{
4335 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4337 struct ipr_resource_entry *res;
4338
4339 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4340 if ((res->bus == starget->channel) &&
0ee1d714 4341 (res->target == starget->id)) {
35a39691
BK
4342 return res;
4343 }
4344 }
4345
4346 return NULL;
4347}
4348
4349static struct ata_port_info sata_port_info;
4350
4351/**
4352 * ipr_target_alloc - Prepare for commands to a SCSI target
4353 * @starget: scsi target struct
4354 *
4355 * If the device is a SATA device, this function allocates an
4356 * ATA port with libata, else it does nothing.
4357 *
4358 * Return value:
4359 * 0 on success / non-0 on failure
4360 **/
4361static int ipr_target_alloc(struct scsi_target *starget)
4362{
4363 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4364 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4365 struct ipr_sata_port *sata_port;
4366 struct ata_port *ap;
4367 struct ipr_resource_entry *res;
4368 unsigned long lock_flags;
4369
4370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4371 res = ipr_find_starget(starget);
4372 starget->hostdata = NULL;
4373
4374 if (res && ipr_is_gata(res)) {
4375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4376 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4377 if (!sata_port)
4378 return -ENOMEM;
4379
4380 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4381 if (ap) {
4382 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4383 sata_port->ioa_cfg = ioa_cfg;
4384 sata_port->ap = ap;
4385 sata_port->res = res;
4386
4387 res->sata_port = sata_port;
4388 ap->private_data = sata_port;
4389 starget->hostdata = sata_port;
4390 } else {
4391 kfree(sata_port);
4392 return -ENOMEM;
4393 }
4394 }
4395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396
4397 return 0;
4398}
4399
4400/**
4401 * ipr_target_destroy - Destroy a SCSI target
4402 * @starget: scsi target struct
4403 *
4404 * If the device was a SATA device, this function frees the libata
4405 * ATA port, else it does nothing.
4406 *
4407 **/
4408static void ipr_target_destroy(struct scsi_target *starget)
4409{
4410 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4411 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4412 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4413
4414 if (ioa_cfg->sis64) {
0ee1d714
BK
4415 if (!ipr_find_starget(starget)) {
4416 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4417 clear_bit(starget->id, ioa_cfg->array_ids);
4418 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4419 clear_bit(starget->id, ioa_cfg->vset_ids);
4420 else if (starget->channel == 0)
4421 clear_bit(starget->id, ioa_cfg->target_ids);
4422 }
3e7ebdfa 4423 }
35a39691
BK
4424
4425 if (sata_port) {
4426 starget->hostdata = NULL;
4427 ata_sas_port_destroy(sata_port->ap);
4428 kfree(sata_port);
4429 }
4430}
4431
4432/**
4433 * ipr_find_sdev - Find device based on bus/target/lun.
4434 * @sdev: scsi device struct
4435 *
4436 * Return value:
4437 * resource entry pointer if found / NULL if not found
4438 **/
4439static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4440{
4441 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4442 struct ipr_resource_entry *res;
4443
4444 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4445 if ((res->bus == sdev->channel) &&
4446 (res->target == sdev->id) &&
4447 (res->lun == sdev->lun))
35a39691
BK
4448 return res;
4449 }
4450
4451 return NULL;
4452}
4453
1da177e4
LT
4454/**
4455 * ipr_slave_destroy - Unconfigure a SCSI device
4456 * @sdev: scsi device struct
4457 *
4458 * Return value:
4459 * nothing
4460 **/
4461static void ipr_slave_destroy(struct scsi_device *sdev)
4462{
4463 struct ipr_resource_entry *res;
4464 struct ipr_ioa_cfg *ioa_cfg;
4465 unsigned long lock_flags = 0;
4466
4467 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4468
4469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4470 res = (struct ipr_resource_entry *) sdev->hostdata;
4471 if (res) {
35a39691 4472 if (res->sata_port)
3e4ec344 4473 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4474 sdev->hostdata = NULL;
4475 res->sdev = NULL;
35a39691 4476 res->sata_port = NULL;
1da177e4
LT
4477 }
4478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4479}
4480
4481/**
4482 * ipr_slave_configure - Configure a SCSI device
4483 * @sdev: scsi device struct
4484 *
4485 * This function configures the specified scsi device.
4486 *
4487 * Return value:
4488 * 0 on success
4489 **/
4490static int ipr_slave_configure(struct scsi_device *sdev)
4491{
4492 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4493 struct ipr_resource_entry *res;
dd406ef8 4494 struct ata_port *ap = NULL;
1da177e4 4495 unsigned long lock_flags = 0;
3e7ebdfa 4496 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4497
4498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4499 res = sdev->hostdata;
4500 if (res) {
4501 if (ipr_is_af_dasd_device(res))
4502 sdev->type = TYPE_RAID;
0726ce26 4503 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4504 sdev->scsi_level = 4;
0726ce26 4505 sdev->no_uld_attach = 1;
4506 }
1da177e4 4507 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4508 blk_queue_rq_timeout(sdev->request_queue,
4509 IPR_VSET_RW_TIMEOUT);
086fa5ff 4510 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4511 }
dd406ef8
BK
4512 if (ipr_is_gata(res) && res->sata_port)
4513 ap = res->sata_port->ap;
4514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515
4516 if (ap) {
35a39691 4517 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4518 ata_sas_slave_configure(sdev, ap);
4519 } else
35a39691 4520 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4521 if (ioa_cfg->sis64)
4522 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
5adcbeb3
WB
4523 ipr_format_res_path(res->res_path, buffer,
4524 sizeof(buffer)));
dd406ef8 4525 return 0;
1da177e4
LT
4526 }
4527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4528 return 0;
4529}
4530
35a39691
BK
4531/**
4532 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4533 * @sdev: scsi device struct
4534 *
4535 * This function initializes an ATA port so that future commands
4536 * sent through queuecommand will work.
4537 *
4538 * Return value:
4539 * 0 on success
4540 **/
4541static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4542{
4543 struct ipr_sata_port *sata_port = NULL;
4544 int rc = -ENXIO;
4545
4546 ENTER;
4547 if (sdev->sdev_target)
4548 sata_port = sdev->sdev_target->hostdata;
4549 if (sata_port)
4550 rc = ata_sas_port_init(sata_port->ap);
4551 if (rc)
4552 ipr_slave_destroy(sdev);
4553
4554 LEAVE;
4555 return rc;
4556}
4557
1da177e4
LT
4558/**
4559 * ipr_slave_alloc - Prepare for commands to a device.
4560 * @sdev: scsi device struct
4561 *
4562 * This function saves a pointer to the resource entry
4563 * in the scsi device struct if the device exists. We
4564 * can then use this pointer in ipr_queuecommand when
4565 * handling new commands.
4566 *
4567 * Return value:
692aebfc 4568 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4569 **/
4570static int ipr_slave_alloc(struct scsi_device *sdev)
4571{
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4573 struct ipr_resource_entry *res;
4574 unsigned long lock_flags;
692aebfc 4575 int rc = -ENXIO;
1da177e4
LT
4576
4577 sdev->hostdata = NULL;
4578
4579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4580
35a39691
BK
4581 res = ipr_find_sdev(sdev);
4582 if (res) {
4583 res->sdev = sdev;
4584 res->add_to_ml = 0;
4585 res->in_erp = 0;
4586 sdev->hostdata = res;
4587 if (!ipr_is_naca_model(res))
4588 res->needs_sync_complete = 1;
4589 rc = 0;
4590 if (ipr_is_gata(res)) {
4591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4593 }
4594 }
4595
4596 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4597
692aebfc 4598 return rc;
1da177e4
LT
4599}
4600
4601/**
4602 * ipr_eh_host_reset - Reset the host adapter
4603 * @scsi_cmd: scsi command struct
4604 *
4605 * Return value:
4606 * SUCCESS / FAILED
4607 **/
df0ae249 4608static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4609{
4610 struct ipr_ioa_cfg *ioa_cfg;
4611 int rc;
4612
4613 ENTER;
4614 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4615
a92fa25c
KSS
4616 if (!ioa_cfg->in_reset_reload) {
4617 dev_err(&ioa_cfg->pdev->dev,
4618 "Adapter being reset as a result of error recovery.\n");
1da177e4 4619
a92fa25c
KSS
4620 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4621 ioa_cfg->sdt_state = GET_DUMP;
4622 }
1da177e4
LT
4623
4624 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4625
4626 LEAVE;
4627 return rc;
4628}
4629
df0ae249
JG
4630static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4631{
4632 int rc;
4633
4634 spin_lock_irq(cmd->device->host->host_lock);
4635 rc = __ipr_eh_host_reset(cmd);
4636 spin_unlock_irq(cmd->device->host->host_lock);
4637
4638 return rc;
4639}
4640
c6513096
BK
4641/**
4642 * ipr_device_reset - Reset the device
4643 * @ioa_cfg: ioa config struct
4644 * @res: resource entry struct
4645 *
4646 * This function issues a device reset to the affected device.
4647 * If the device is a SCSI device, a LUN reset will be sent
4648 * to the device first. If that does not work, a target reset
35a39691
BK
4649 * will be sent. If the device is a SATA device, a PHY reset will
4650 * be sent.
c6513096
BK
4651 *
4652 * Return value:
4653 * 0 on success / non-zero on failure
4654 **/
4655static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4656 struct ipr_resource_entry *res)
4657{
4658 struct ipr_cmnd *ipr_cmd;
4659 struct ipr_ioarcb *ioarcb;
4660 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4661 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4662 u32 ioasc;
4663
4664 ENTER;
4665 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4666 ioarcb = &ipr_cmd->ioarcb;
4667 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4668
4669 if (ipr_cmd->ioa_cfg->sis64) {
4670 regs = &ipr_cmd->i.ata_ioadl.regs;
4671 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4672 } else
4673 regs = &ioarcb->u.add_data.u.regs;
c6513096 4674
3e7ebdfa 4675 ioarcb->res_handle = res->res_handle;
c6513096
BK
4676 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4677 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4678 if (ipr_is_gata(res)) {
4679 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4680 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4681 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4682 }
c6513096
BK
4683
4684 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4685 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
c6513096 4686 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
96d21f00
WB
4687 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4688 if (ipr_cmd->ioa_cfg->sis64)
4689 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4690 sizeof(struct ipr_ioasa_gata));
4691 else
4692 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4693 sizeof(struct ipr_ioasa_gata));
4694 }
c6513096
BK
4695
4696 LEAVE;
4697 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4698}
4699
35a39691
BK
4700/**
4701 * ipr_sata_reset - Reset the SATA port
cc0680a5 4702 * @link: SATA link to reset
35a39691
BK
4703 * @classes: class of the attached device
4704 *
cc0680a5 4705 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4706 *
4707 * Return value:
4708 * 0 on success / non-zero on failure
4709 **/
cc0680a5 4710static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4711 unsigned long deadline)
35a39691 4712{
cc0680a5 4713 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4714 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4715 struct ipr_resource_entry *res;
4716 unsigned long lock_flags = 0;
4717 int rc = -ENXIO;
4718
4719 ENTER;
4720 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
73d98ff0
BK
4721 while(ioa_cfg->in_reset_reload) {
4722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4724 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4725 }
4726
35a39691
BK
4727 res = sata_port->res;
4728 if (res) {
4729 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4730 *classes = res->ata_class;
35a39691
BK
4731 }
4732
4733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4734 LEAVE;
4735 return rc;
4736}
4737
1da177e4
LT
4738/**
4739 * ipr_eh_dev_reset - Reset the device
4740 * @scsi_cmd: scsi command struct
4741 *
4742 * This function issues a device reset to the affected device.
4743 * A LUN reset will be sent to the device first. If that does
4744 * not work, a target reset will be sent.
4745 *
4746 * Return value:
4747 * SUCCESS / FAILED
4748 **/
94d0e7b8 4749static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
1da177e4
LT
4750{
4751 struct ipr_cmnd *ipr_cmd;
4752 struct ipr_ioa_cfg *ioa_cfg;
4753 struct ipr_resource_entry *res;
35a39691
BK
4754 struct ata_port *ap;
4755 int rc = 0;
1da177e4
LT
4756
4757 ENTER;
4758 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4759 res = scsi_cmd->device->hostdata;
4760
eeb88307 4761 if (!res)
1da177e4
LT
4762 return FAILED;
4763
4764 /*
4765 * If we are currently going through reset/reload, return failed. This will force the
4766 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4767 * reset to complete
4768 */
4769 if (ioa_cfg->in_reset_reload)
4770 return FAILED;
4771 if (ioa_cfg->ioa_is_dead)
4772 return FAILED;
4773
4774 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4775 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4776 if (ipr_cmd->scsi_cmd)
4777 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4778 if (ipr_cmd->qc)
4779 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4780 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4781 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4782 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4783 }
1da177e4
LT
4784 }
4785 }
4786
4787 res->resetting_device = 1;
fb3ed3cb 4788 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4789
4790 if (ipr_is_gata(res) && res->sata_port) {
4791 ap = res->sata_port->ap;
4792 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4793 ata_std_error_handler(ap);
35a39691 4794 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4795
4796 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4797 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4798 rc = -EIO;
4799 break;
4800 }
4801 }
35a39691
BK
4802 } else
4803 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4804 res->resetting_device = 0;
4805
1da177e4 4806 LEAVE;
c6513096 4807 return (rc ? FAILED : SUCCESS);
1da177e4
LT
4808}
4809
94d0e7b8
JG
4810static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4811{
4812 int rc;
4813
4814 spin_lock_irq(cmd->device->host->host_lock);
4815 rc = __ipr_eh_dev_reset(cmd);
4816 spin_unlock_irq(cmd->device->host->host_lock);
4817
4818 return rc;
4819}
4820
1da177e4
LT
4821/**
4822 * ipr_bus_reset_done - Op done function for bus reset.
4823 * @ipr_cmd: ipr command struct
4824 *
4825 * This function is the op done function for a bus reset
4826 *
4827 * Return value:
4828 * none
4829 **/
4830static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4831{
4832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4833 struct ipr_resource_entry *res;
4834
4835 ENTER;
3e7ebdfa
WB
4836 if (!ioa_cfg->sis64)
4837 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4838 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4839 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4840 break;
4841 }
1da177e4 4842 }
1da177e4
LT
4843
4844 /*
4845 * If abort has not completed, indicate the reset has, else call the
4846 * abort's done function to wake the sleeping eh thread
4847 */
4848 if (ipr_cmd->sibling->sibling)
4849 ipr_cmd->sibling->sibling = NULL;
4850 else
4851 ipr_cmd->sibling->done(ipr_cmd->sibling);
4852
4853 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4854 LEAVE;
4855}
4856
4857/**
4858 * ipr_abort_timeout - An abort task has timed out
4859 * @ipr_cmd: ipr command struct
4860 *
4861 * This function handles when an abort task times out. If this
4862 * happens we issue a bus reset since we have resources tied
4863 * up that must be freed before returning to the midlayer.
4864 *
4865 * Return value:
4866 * none
4867 **/
4868static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4869{
4870 struct ipr_cmnd *reset_cmd;
4871 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4872 struct ipr_cmd_pkt *cmd_pkt;
4873 unsigned long lock_flags = 0;
4874
4875 ENTER;
4876 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4878 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4879 return;
4880 }
4881
fb3ed3cb 4882 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4883 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4884 ipr_cmd->sibling = reset_cmd;
4885 reset_cmd->sibling = ipr_cmd;
4886 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4887 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4888 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4889 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4890 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4891
4892 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894 LEAVE;
4895}
4896
4897/**
4898 * ipr_cancel_op - Cancel specified op
4899 * @scsi_cmd: scsi command struct
4900 *
4901 * This function cancels specified op.
4902 *
4903 * Return value:
4904 * SUCCESS / FAILED
4905 **/
4906static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4907{
4908 struct ipr_cmnd *ipr_cmd;
4909 struct ipr_ioa_cfg *ioa_cfg;
4910 struct ipr_resource_entry *res;
4911 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 4912 u32 ioasc, int_reg;
1da177e4
LT
4913 int op_found = 0;
4914
4915 ENTER;
4916 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4917 res = scsi_cmd->device->hostdata;
4918
8fa728a2
JG
4919 /* If we are currently going through reset/reload, return failed.
4920 * This will force the mid-layer to call ipr_eh_host_reset,
4921 * which will then go to sleep and wait for the reset to complete
4922 */
4923 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4924 return FAILED;
a92fa25c
KSS
4925 if (!res)
4926 return FAILED;
4927
4928 /*
4929 * If we are aborting a timed out op, chances are that the timeout was caused
4930 * by a still not detected EEH error. In such cases, reading a register will
4931 * trigger the EEH recovery infrastructure.
4932 */
4933 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4934
4935 if (!ipr_is_gscsi(res))
1da177e4
LT
4936 return FAILED;
4937
4938 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4939 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4940 ipr_cmd->done = ipr_scsi_eh_done;
4941 op_found = 1;
4942 break;
4943 }
4944 }
4945
4946 if (!op_found)
4947 return SUCCESS;
4948
4949 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 4950 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
4951 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4952 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4953 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4954 ipr_cmd->u.sdev = scsi_cmd->device;
4955
fb3ed3cb
BK
4956 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4957 scsi_cmd->cmnd[0]);
1da177e4 4958 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 4959 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
4960
4961 /*
4962 * If the abort task timed out and we sent a bus reset, we will get
4963 * one the following responses to the abort
4964 */
4965 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4966 ioasc = 0;
4967 ipr_trace;
4968 }
4969
4970 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa 4971 if (!ipr_is_naca_model(res))
4972 res->needs_sync_complete = 1;
1da177e4
LT
4973
4974 LEAVE;
4975 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4976}
4977
4978/**
4979 * ipr_eh_abort - Abort a single op
4980 * @scsi_cmd: scsi command struct
4981 *
4982 * Return value:
4983 * SUCCESS / FAILED
4984 **/
4985static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4986{
8fa728a2
JG
4987 unsigned long flags;
4988 int rc;
1da177e4
LT
4989
4990 ENTER;
1da177e4 4991
8fa728a2
JG
4992 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4993 rc = ipr_cancel_op(scsi_cmd);
4994 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
4995
4996 LEAVE;
8fa728a2 4997 return rc;
1da177e4
LT
4998}
4999
5000/**
5001 * ipr_handle_other_interrupt - Handle "other" interrupts
5002 * @ioa_cfg: ioa config struct
634651fa 5003 * @int_reg: interrupt register
1da177e4
LT
5004 *
5005 * Return value:
5006 * IRQ_NONE / IRQ_HANDLED
5007 **/
634651fa 5008static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5009 u32 int_reg)
1da177e4
LT
5010{
5011 irqreturn_t rc = IRQ_HANDLED;
7dacb64f
WB
5012 u32 int_mask_reg;
5013
5014 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5015 int_reg &= ~int_mask_reg;
5016
5017 /* If an interrupt on the adapter did not occur, ignore it.
5018 * Or in the case of SIS 64, check for a stage change interrupt.
5019 */
5020 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5021 if (ioa_cfg->sis64) {
5022 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5023 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5024 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5025
5026 /* clear stage change */
5027 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5028 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5029 list_del(&ioa_cfg->reset_cmd->queue);
5030 del_timer(&ioa_cfg->reset_cmd->timer);
5031 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5032 return IRQ_HANDLED;
5033 }
5034 }
5035
5036 return IRQ_NONE;
5037 }
1da177e4
LT
5038
5039 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5040 /* Mask the interrupt */
5041 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5042
5043 /* Clear the interrupt */
5044 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5045 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5046
5047 list_del(&ioa_cfg->reset_cmd->queue);
5048 del_timer(&ioa_cfg->reset_cmd->timer);
5049 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5050 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5051 if (ioa_cfg->clear_isr) {
5052 if (ipr_debug && printk_ratelimit())
5053 dev_err(&ioa_cfg->pdev->dev,
5054 "Spurious interrupt detected. 0x%08X\n", int_reg);
5055 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5056 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5057 return IRQ_NONE;
5058 }
1da177e4
LT
5059 } else {
5060 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5061 ioa_cfg->ioa_unit_checked = 1;
5062 else
5063 dev_err(&ioa_cfg->pdev->dev,
5064 "Permanent IOA failure. 0x%08X\n", int_reg);
5065
5066 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5067 ioa_cfg->sdt_state = GET_DUMP;
5068
5069 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5070 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5071 }
5072
5073 return rc;
5074}
5075
3feeb89d
WB
5076/**
5077 * ipr_isr_eh - Interrupt service routine error handler
5078 * @ioa_cfg: ioa config struct
5079 * @msg: message to log
5080 *
5081 * Return value:
5082 * none
5083 **/
5084static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5085{
5086 ioa_cfg->errors_logged++;
5087 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5088
5089 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5090 ioa_cfg->sdt_state = GET_DUMP;
5091
5092 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5093}
5094
1da177e4
LT
5095/**
5096 * ipr_isr - Interrupt service routine
5097 * @irq: irq number
5098 * @devp: pointer to ioa config struct
1da177e4
LT
5099 *
5100 * Return value:
5101 * IRQ_NONE / IRQ_HANDLED
5102 **/
7d12e780 5103static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
5104{
5105 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5106 unsigned long lock_flags = 0;
7dacb64f 5107 u32 int_reg = 0;
1da177e4
LT
5108 u32 ioasc;
5109 u16 cmd_index;
3feeb89d 5110 int num_hrrq = 0;
7dacb64f 5111 int irq_none = 0;
1da177e4
LT
5112 struct ipr_cmnd *ipr_cmd;
5113 irqreturn_t rc = IRQ_NONE;
5114
5115 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5116
5117 /* If interrupts are disabled, ignore the interrupt */
5118 if (!ioa_cfg->allow_interrupts) {
5119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5120 return IRQ_NONE;
5121 }
5122
1da177e4
LT
5123 while (1) {
5124 ipr_cmd = NULL;
5125
5126 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5127 ioa_cfg->toggle_bit) {
5128
5129 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5130 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5131
5132 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 5133 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
1da177e4
LT
5134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5135 return IRQ_HANDLED;
5136 }
5137
5138 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5139
96d21f00 5140 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5141
5142 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5143
5144 list_del(&ipr_cmd->queue);
5145 del_timer(&ipr_cmd->timer);
5146 ipr_cmd->done(ipr_cmd);
5147
5148 rc = IRQ_HANDLED;
5149
5150 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5151 ioa_cfg->hrrq_curr++;
5152 } else {
5153 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5154 ioa_cfg->toggle_bit ^= 1u;
5155 }
5156 }
5157
7dd21308
BK
5158 if (ipr_cmd && !ioa_cfg->clear_isr)
5159 break;
5160
1da177e4
LT
5161 if (ipr_cmd != NULL) {
5162 /* Clear the PCI interrupt */
a5442ba4 5163 num_hrrq = 0;
3feeb89d 5164 do {
214777ba 5165 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5166 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
5167 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5168 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5169
7dacb64f
WB
5170 } else if (rc == IRQ_NONE && irq_none == 0) {
5171 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5172 irq_none++;
a5442ba4
WB
5173 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5174 int_reg & IPR_PCII_HRRQ_UPDATED) {
5175 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5176 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5177 return IRQ_HANDLED;
1da177e4
LT
5178 } else
5179 break;
5180 }
5181
5182 if (unlikely(rc == IRQ_NONE))
634651fa 5183 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4
LT
5184
5185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186 return rc;
5187}
5188
a32c055f
WB
5189/**
5190 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5191 * @ioa_cfg: ioa config struct
5192 * @ipr_cmd: ipr command struct
5193 *
5194 * Return value:
5195 * 0 on success / -1 on failure
5196 **/
5197static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5198 struct ipr_cmnd *ipr_cmd)
5199{
5200 int i, nseg;
5201 struct scatterlist *sg;
5202 u32 length;
5203 u32 ioadl_flags = 0;
5204 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5205 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5206 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5207
5208 length = scsi_bufflen(scsi_cmd);
5209 if (!length)
5210 return 0;
5211
5212 nseg = scsi_dma_map(scsi_cmd);
5213 if (nseg < 0) {
51f52a47
AB
5214 if (printk_ratelimit())
5215 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
a32c055f
WB
5216 return -1;
5217 }
5218
5219 ipr_cmd->dma_use_sg = nseg;
5220
438b0331 5221 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5222 ioarcb->ioadl_len =
5223 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5224
a32c055f
WB
5225 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5226 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5227 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5228 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5229 ioadl_flags = IPR_IOADL_FLAGS_READ;
5230
5231 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5232 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5233 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5234 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5235 }
5236
5237 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5238 return 0;
5239}
5240
1da177e4
LT
5241/**
5242 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5243 * @ioa_cfg: ioa config struct
5244 * @ipr_cmd: ipr command struct
5245 *
5246 * Return value:
5247 * 0 on success / -1 on failure
5248 **/
5249static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5250 struct ipr_cmnd *ipr_cmd)
5251{
63015bc9
FT
5252 int i, nseg;
5253 struct scatterlist *sg;
1da177e4
LT
5254 u32 length;
5255 u32 ioadl_flags = 0;
5256 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5257 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5258 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5259
63015bc9
FT
5260 length = scsi_bufflen(scsi_cmd);
5261 if (!length)
1da177e4
LT
5262 return 0;
5263
63015bc9
FT
5264 nseg = scsi_dma_map(scsi_cmd);
5265 if (nseg < 0) {
5266 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5267 return -1;
5268 }
51b1c7e1 5269
63015bc9
FT
5270 ipr_cmd->dma_use_sg = nseg;
5271
5272 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5273 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5274 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5275 ioarcb->data_transfer_length = cpu_to_be32(length);
5276 ioarcb->ioadl_len =
63015bc9
FT
5277 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5278 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5279 ioadl_flags = IPR_IOADL_FLAGS_READ;
5280 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5281 ioarcb->read_ioadl_len =
5282 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5283 }
1da177e4 5284
a32c055f
WB
5285 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5286 ioadl = ioarcb->u.add_data.u.ioadl;
5287 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5288 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5289 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5290 }
1da177e4 5291
63015bc9
FT
5292 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5293 ioadl[i].flags_and_data_len =
5294 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5295 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5296 }
5297
63015bc9
FT
5298 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5299 return 0;
1da177e4
LT
5300}
5301
5302/**
5303 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5304 * @scsi_cmd: scsi command struct
5305 *
5306 * Return value:
5307 * task attributes
5308 **/
5309static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5310{
5311 u8 tag[2];
5312 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5313
5314 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5315 switch (tag[0]) {
5316 case MSG_SIMPLE_TAG:
5317 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5318 break;
5319 case MSG_HEAD_TAG:
5320 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5321 break;
5322 case MSG_ORDERED_TAG:
5323 rc = IPR_FLAGS_LO_ORDERED_TASK;
5324 break;
5325 };
5326 }
5327
5328 return rc;
5329}
5330
5331/**
5332 * ipr_erp_done - Process completion of ERP for a device
5333 * @ipr_cmd: ipr command struct
5334 *
5335 * This function copies the sense buffer into the scsi_cmd
5336 * struct and pushes the scsi_done function.
5337 *
5338 * Return value:
5339 * nothing
5340 **/
5341static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5342{
5343 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5344 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5345 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 5346 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5347
5348 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5349 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5350 scmd_printk(KERN_ERR, scsi_cmd,
5351 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5352 } else {
5353 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5354 SCSI_SENSE_BUFFERSIZE);
5355 }
5356
5357 if (res) {
ee0a90fa 5358 if (!ipr_is_naca_model(res))
5359 res->needs_sync_complete = 1;
1da177e4
LT
5360 res->in_erp = 0;
5361 }
63015bc9 5362 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5363 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5364 scsi_cmd->scsi_done(scsi_cmd);
5365}
5366
5367/**
5368 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5369 * @ipr_cmd: ipr command struct
5370 *
5371 * Return value:
5372 * none
5373 **/
5374static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5375{
51b1c7e1 5376 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5377 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5378 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5379
5380 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5381 ioarcb->data_transfer_length = 0;
1da177e4 5382 ioarcb->read_data_transfer_length = 0;
a32c055f 5383 ioarcb->ioadl_len = 0;
1da177e4 5384 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5385 ioasa->hdr.ioasc = 0;
5386 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5387
5388 if (ipr_cmd->ioa_cfg->sis64)
5389 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5390 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5391 else {
5392 ioarcb->write_ioadl_addr =
5393 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5394 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5395 }
1da177e4
LT
5396}
5397
5398/**
5399 * ipr_erp_request_sense - Send request sense to a device
5400 * @ipr_cmd: ipr command struct
5401 *
5402 * This function sends a request sense to a device as a result
5403 * of a check condition.
5404 *
5405 * Return value:
5406 * nothing
5407 **/
5408static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5409{
5410 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5411 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5412
5413 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5414 ipr_erp_done(ipr_cmd);
5415 return;
5416 }
5417
5418 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5419
5420 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5421 cmd_pkt->cdb[0] = REQUEST_SENSE;
5422 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5423 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5424 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5425 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5426
a32c055f
WB
5427 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5428 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5429
5430 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5431 IPR_REQUEST_SENSE_TIMEOUT * 2);
5432}
5433
5434/**
5435 * ipr_erp_cancel_all - Send cancel all to a device
5436 * @ipr_cmd: ipr command struct
5437 *
5438 * This function sends a cancel all to a device to clear the
5439 * queue. If we are running TCQ on the device, QERR is set to 1,
5440 * which means all outstanding ops have been dropped on the floor.
5441 * Cancel all will return them to us.
5442 *
5443 * Return value:
5444 * nothing
5445 **/
5446static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5447{
5448 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5449 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5450 struct ipr_cmd_pkt *cmd_pkt;
5451
5452 res->in_erp = 1;
5453
5454 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5455
5456 if (!scsi_get_tag_type(scsi_cmd->device)) {
5457 ipr_erp_request_sense(ipr_cmd);
5458 return;
5459 }
5460
5461 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5462 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5463 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5464
5465 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5466 IPR_CANCEL_ALL_TIMEOUT);
5467}
5468
5469/**
5470 * ipr_dump_ioasa - Dump contents of IOASA
5471 * @ioa_cfg: ioa config struct
5472 * @ipr_cmd: ipr command struct
fe964d0a 5473 * @res: resource entry struct
1da177e4
LT
5474 *
5475 * This function is invoked by the interrupt handler when ops
5476 * fail. It will log the IOASA if appropriate. Only called
5477 * for GPDD ops.
5478 *
5479 * Return value:
5480 * none
5481 **/
5482static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5483 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5484{
5485 int i;
5486 u16 data_len;
b0692dd4 5487 u32 ioasc, fd_ioasc;
96d21f00 5488 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5489 __be32 *ioasa_data = (__be32 *)ioasa;
5490 int error_index;
5491
96d21f00
WB
5492 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5493 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5494
5495 if (0 == ioasc)
5496 return;
5497
5498 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5499 return;
5500
b0692dd4
BK
5501 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5502 error_index = ipr_get_error(fd_ioasc);
5503 else
5504 error_index = ipr_get_error(ioasc);
1da177e4
LT
5505
5506 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5507 /* Don't log an error if the IOA already logged one */
96d21f00 5508 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5509 return;
5510
cc9bd5d4
BK
5511 if (!ipr_is_gscsi(res))
5512 return;
5513
1da177e4
LT
5514 if (ipr_error_table[error_index].log_ioasa == 0)
5515 return;
5516 }
5517
fe964d0a 5518 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5519
96d21f00
WB
5520 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5521 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5522 data_len = sizeof(struct ipr_ioasa64);
5523 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5524 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5525
5526 ipr_err("IOASA Dump:\n");
5527
5528 for (i = 0; i < data_len / 4; i += 4) {
5529 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5530 be32_to_cpu(ioasa_data[i]),
5531 be32_to_cpu(ioasa_data[i+1]),
5532 be32_to_cpu(ioasa_data[i+2]),
5533 be32_to_cpu(ioasa_data[i+3]));
5534 }
5535}
5536
5537/**
5538 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5539 * @ioasa: IOASA
5540 * @sense_buf: sense data buffer
5541 *
5542 * Return value:
5543 * none
5544 **/
5545static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5546{
5547 u32 failing_lba;
5548 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5549 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5550 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5551 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5552
5553 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5554
5555 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5556 return;
5557
5558 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5559
5560 if (ipr_is_vset_device(res) &&
5561 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5562 ioasa->u.vset.failing_lba_hi != 0) {
5563 sense_buf[0] = 0x72;
5564 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5565 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5566 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5567
5568 sense_buf[7] = 12;
5569 sense_buf[8] = 0;
5570 sense_buf[9] = 0x0A;
5571 sense_buf[10] = 0x80;
5572
5573 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5574
5575 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5576 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5577 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5578 sense_buf[15] = failing_lba & 0x000000ff;
5579
5580 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5581
5582 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5583 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5584 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5585 sense_buf[19] = failing_lba & 0x000000ff;
5586 } else {
5587 sense_buf[0] = 0x70;
5588 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5589 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5590 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5591
5592 /* Illegal request */
5593 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5594 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5595 sense_buf[7] = 10; /* additional length */
5596
5597 /* IOARCB was in error */
5598 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5599 sense_buf[15] = 0xC0;
5600 else /* Parameter data was invalid */
5601 sense_buf[15] = 0x80;
5602
5603 sense_buf[16] =
5604 ((IPR_FIELD_POINTER_MASK &
96d21f00 5605 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5606 sense_buf[17] =
5607 (IPR_FIELD_POINTER_MASK &
96d21f00 5608 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5609 } else {
5610 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5611 if (ipr_is_vset_device(res))
5612 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5613 else
5614 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5615
5616 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5617 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5618 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5619 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5620 sense_buf[6] = failing_lba & 0x000000ff;
5621 }
5622
5623 sense_buf[7] = 6; /* additional length */
5624 }
5625 }
5626}
5627
ee0a90fa 5628/**
5629 * ipr_get_autosense - Copy autosense data to sense buffer
5630 * @ipr_cmd: ipr command struct
5631 *
5632 * This function copies the autosense buffer to the buffer
5633 * in the scsi_cmd, if there is autosense available.
5634 *
5635 * Return value:
5636 * 1 if autosense was available / 0 if not
5637 **/
5638static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5639{
96d21f00
WB
5640 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5641 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5642
96d21f00 5643 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 5644 return 0;
5645
96d21f00
WB
5646 if (ipr_cmd->ioa_cfg->sis64)
5647 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5648 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5649 SCSI_SENSE_BUFFERSIZE));
5650 else
5651 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5652 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5653 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 5654 return 1;
5655}
5656
1da177e4
LT
5657/**
5658 * ipr_erp_start - Process an error response for a SCSI op
5659 * @ioa_cfg: ioa config struct
5660 * @ipr_cmd: ipr command struct
5661 *
5662 * This function determines whether or not to initiate ERP
5663 * on the affected device.
5664 *
5665 * Return value:
5666 * nothing
5667 **/
5668static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5669 struct ipr_cmnd *ipr_cmd)
5670{
5671 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5672 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5673 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5674 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5675
5676 if (!res) {
5677 ipr_scsi_eh_done(ipr_cmd);
5678 return;
5679 }
5680
8a048994 5681 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5682 ipr_gen_sense(ipr_cmd);
5683
cc9bd5d4
BK
5684 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5685
8a048994 5686 switch (masked_ioasc) {
1da177e4 5687 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 5688 if (ipr_is_naca_model(res))
5689 scsi_cmd->result |= (DID_ABORT << 16);
5690 else
5691 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5692 break;
5693 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5694 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5695 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5696 break;
5697 case IPR_IOASC_HW_SEL_TIMEOUT:
5698 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 5699 if (!ipr_is_naca_model(res))
5700 res->needs_sync_complete = 1;
1da177e4
LT
5701 break;
5702 case IPR_IOASC_SYNC_REQUIRED:
5703 if (!res->in_erp)
5704 res->needs_sync_complete = 1;
5705 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5706 break;
5707 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5708 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5709 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5710 break;
5711 case IPR_IOASC_BUS_WAS_RESET:
5712 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5713 /*
5714 * Report the bus reset and ask for a retry. The device
5715 * will give CC/UA the next command.
5716 */
5717 if (!res->resetting_device)
5718 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5719 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5720 if (!ipr_is_naca_model(res))
5721 res->needs_sync_complete = 1;
1da177e4
LT
5722 break;
5723 case IPR_IOASC_HW_DEV_BUS_STATUS:
5724 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5725 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 5726 if (!ipr_get_autosense(ipr_cmd)) {
5727 if (!ipr_is_naca_model(res)) {
5728 ipr_erp_cancel_all(ipr_cmd);
5729 return;
5730 }
5731 }
1da177e4 5732 }
ee0a90fa 5733 if (!ipr_is_naca_model(res))
5734 res->needs_sync_complete = 1;
1da177e4
LT
5735 break;
5736 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5737 break;
5738 default:
5b7304fb
BK
5739 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5740 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5741 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5742 res->needs_sync_complete = 1;
5743 break;
5744 }
5745
63015bc9 5746 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5747 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5748 scsi_cmd->scsi_done(scsi_cmd);
5749}
5750
5751/**
5752 * ipr_scsi_done - mid-layer done function
5753 * @ipr_cmd: ipr command struct
5754 *
5755 * This function is invoked by the interrupt handler for
5756 * ops generated by the SCSI mid-layer
5757 *
5758 * Return value:
5759 * none
5760 **/
5761static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5762{
5763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5764 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5765 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 5766
96d21f00 5767 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5768
5769 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
63015bc9 5770 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5771 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5772 scsi_cmd->scsi_done(scsi_cmd);
5773 } else
5774 ipr_erp_start(ioa_cfg, ipr_cmd);
5775}
5776
1da177e4
LT
5777/**
5778 * ipr_queuecommand - Queue a mid-layer request
5779 * @scsi_cmd: scsi command struct
5780 * @done: done function
5781 *
5782 * This function queues a request generated by the mid-layer.
5783 *
5784 * Return value:
5785 * 0 on success
5786 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5787 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5788 **/
f281233d 5789static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
1da177e4
LT
5790 void (*done) (struct scsi_cmnd *))
5791{
5792 struct ipr_ioa_cfg *ioa_cfg;
5793 struct ipr_resource_entry *res;
5794 struct ipr_ioarcb *ioarcb;
5795 struct ipr_cmnd *ipr_cmd;
5796 int rc = 0;
5797
5798 scsi_cmd->scsi_done = done;
5799 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5800 res = scsi_cmd->device->hostdata;
5801 scsi_cmd->result = (DID_OK << 16);
5802
5803 /*
5804 * We are currently blocking all devices due to a host reset
5805 * We have told the host to stop giving us new requests, but
5806 * ERP ops don't count. FIXME
5807 */
5808 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5809 return SCSI_MLQUEUE_HOST_BUSY;
5810
5811 /*
5812 * FIXME - Create scsi_set_host_offline interface
5813 * and the ioa_is_dead check can be removed
5814 */
5815 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5816 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5817 scsi_cmd->result = (DID_NO_CONNECT << 16);
5818 scsi_cmd->scsi_done(scsi_cmd);
5819 return 0;
5820 }
5821
35a39691 5822 if (ipr_is_gata(res) && res->sata_port)
b27dcfb0 5823 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
35a39691 5824
1da177e4
LT
5825 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5826 ioarcb = &ipr_cmd->ioarcb;
5827 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5828
5829 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5830 ipr_cmd->scsi_cmd = scsi_cmd;
3e7ebdfa 5831 ioarcb->res_handle = res->res_handle;
1da177e4 5832 ipr_cmd->done = ipr_scsi_done;
3e7ebdfa 5833 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
1da177e4
LT
5834
5835 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5836 if (scsi_cmd->underflow == 0)
5837 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5838
5839 if (res->needs_sync_complete) {
5840 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5841 res->needs_sync_complete = 0;
5842 }
5843
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ab6c10b1
WB
5845 if (ipr_is_gscsi(res))
5846 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
1da177e4
LT
5847 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5848 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5849 }
5850
5851 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5852 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5853 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5854
a32c055f
WB
5855 if (likely(rc == 0)) {
5856 if (ioa_cfg->sis64)
5857 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5858 else
5859 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5860 }
1da177e4 5861
a5fb407e
BK
5862 if (unlikely(rc != 0)) {
5863 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5864 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
5865 }
5866
a5fb407e 5867 ipr_send_command(ipr_cmd);
1da177e4
LT
5868 return 0;
5869}
5870
f281233d
JG
5871static DEF_SCSI_QCMD(ipr_queuecommand)
5872
35a39691
BK
5873/**
5874 * ipr_ioctl - IOCTL handler
5875 * @sdev: scsi device struct
5876 * @cmd: IOCTL cmd
5877 * @arg: IOCTL arg
5878 *
5879 * Return value:
5880 * 0 on success / other on failure
5881 **/
bd705f2d 5882static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5883{
5884 struct ipr_resource_entry *res;
5885
5886 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5887 if (res && ipr_is_gata(res)) {
5888 if (cmd == HDIO_GET_IDENTITY)
5889 return -ENOTTY;
94be9a58 5890 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5891 }
35a39691
BK
5892
5893 return -EINVAL;
5894}
5895
1da177e4
LT
5896/**
5897 * ipr_info - Get information about the card/driver
5898 * @scsi_host: scsi host struct
5899 *
5900 * Return value:
5901 * pointer to buffer with description string
5902 **/
5903static const char * ipr_ioa_info(struct Scsi_Host *host)
5904{
5905 static char buffer[512];
5906 struct ipr_ioa_cfg *ioa_cfg;
5907 unsigned long lock_flags = 0;
5908
5909 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5910
5911 spin_lock_irqsave(host->host_lock, lock_flags);
5912 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5913 spin_unlock_irqrestore(host->host_lock, lock_flags);
5914
5915 return buffer;
5916}
5917
5918static struct scsi_host_template driver_template = {
5919 .module = THIS_MODULE,
5920 .name = "IPR",
5921 .info = ipr_ioa_info,
35a39691 5922 .ioctl = ipr_ioctl,
1da177e4
LT
5923 .queuecommand = ipr_queuecommand,
5924 .eh_abort_handler = ipr_eh_abort,
5925 .eh_device_reset_handler = ipr_eh_dev_reset,
5926 .eh_host_reset_handler = ipr_eh_host_reset,
5927 .slave_alloc = ipr_slave_alloc,
5928 .slave_configure = ipr_slave_configure,
5929 .slave_destroy = ipr_slave_destroy,
35a39691
BK
5930 .target_alloc = ipr_target_alloc,
5931 .target_destroy = ipr_target_destroy,
1da177e4
LT
5932 .change_queue_depth = ipr_change_queue_depth,
5933 .change_queue_type = ipr_change_queue_type,
5934 .bios_param = ipr_biosparam,
5935 .can_queue = IPR_MAX_COMMANDS,
5936 .this_id = -1,
5937 .sg_tablesize = IPR_MAX_SGLIST,
5938 .max_sectors = IPR_IOA_MAX_SECTORS,
5939 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5940 .use_clustering = ENABLE_CLUSTERING,
5941 .shost_attrs = ipr_ioa_attrs,
5942 .sdev_attrs = ipr_dev_attrs,
5943 .proc_name = IPR_NAME
5944};
5945
35a39691
BK
5946/**
5947 * ipr_ata_phy_reset - libata phy_reset handler
5948 * @ap: ata port to reset
5949 *
5950 **/
5951static void ipr_ata_phy_reset(struct ata_port *ap)
5952{
5953 unsigned long flags;
5954 struct ipr_sata_port *sata_port = ap->private_data;
5955 struct ipr_resource_entry *res = sata_port->res;
5956 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5957 int rc;
5958
5959 ENTER;
5960 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5961 while(ioa_cfg->in_reset_reload) {
5962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5964 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5965 }
5966
5967 if (!ioa_cfg->allow_cmds)
5968 goto out_unlock;
5969
5970 rc = ipr_device_reset(ioa_cfg, res);
5971
5972 if (rc) {
3e4ec344 5973 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5974 goto out_unlock;
5975 }
5976
3e7ebdfa
WB
5977 ap->link.device[0].class = res->ata_class;
5978 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 5979 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
5980
5981out_unlock:
5982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5983 LEAVE;
5984}
5985
5986/**
5987 * ipr_ata_post_internal - Cleanup after an internal command
5988 * @qc: ATA queued command
5989 *
5990 * Return value:
5991 * none
5992 **/
5993static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5994{
5995 struct ipr_sata_port *sata_port = qc->ap->private_data;
5996 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5997 struct ipr_cmnd *ipr_cmd;
5998 unsigned long flags;
5999
6000 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
73d98ff0
BK
6001 while(ioa_cfg->in_reset_reload) {
6002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6003 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6004 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6005 }
6006
35a39691
BK
6007 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6008 if (ipr_cmd->qc == qc) {
6009 ipr_device_reset(ioa_cfg, sata_port->res);
6010 break;
6011 }
6012 }
6013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6014}
6015
35a39691
BK
6016/**
6017 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6018 * @regs: destination
6019 * @tf: source ATA taskfile
6020 *
6021 * Return value:
6022 * none
6023 **/
6024static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6025 struct ata_taskfile *tf)
6026{
6027 regs->feature = tf->feature;
6028 regs->nsect = tf->nsect;
6029 regs->lbal = tf->lbal;
6030 regs->lbam = tf->lbam;
6031 regs->lbah = tf->lbah;
6032 regs->device = tf->device;
6033 regs->command = tf->command;
6034 regs->hob_feature = tf->hob_feature;
6035 regs->hob_nsect = tf->hob_nsect;
6036 regs->hob_lbal = tf->hob_lbal;
6037 regs->hob_lbam = tf->hob_lbam;
6038 regs->hob_lbah = tf->hob_lbah;
6039 regs->ctl = tf->ctl;
6040}
6041
6042/**
6043 * ipr_sata_done - done function for SATA commands
6044 * @ipr_cmd: ipr command struct
6045 *
6046 * This function is invoked by the interrupt handler for
6047 * ops generated by the SCSI mid-layer to SATA devices
6048 *
6049 * Return value:
6050 * none
6051 **/
6052static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6053{
6054 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6055 struct ata_queued_cmd *qc = ipr_cmd->qc;
6056 struct ipr_sata_port *sata_port = qc->ap->private_data;
6057 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6058 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6059
96d21f00
WB
6060 if (ipr_cmd->ioa_cfg->sis64)
6061 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6062 sizeof(struct ipr_ioasa_gata));
6063 else
6064 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6065 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6066 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6067
96d21f00 6068 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6069 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6070
6071 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6072 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6073 else
96d21f00 6074 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
35a39691
BK
6075 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6076 ata_qc_complete(qc);
6077}
6078
a32c055f
WB
6079/**
6080 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6081 * @ipr_cmd: ipr command struct
6082 * @qc: ATA queued command
6083 *
6084 **/
6085static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6086 struct ata_queued_cmd *qc)
6087{
6088 u32 ioadl_flags = 0;
6089 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6090 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6091 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6092 int len = qc->nbytes;
6093 struct scatterlist *sg;
6094 unsigned int si;
6095 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6096
6097 if (len == 0)
6098 return;
6099
6100 if (qc->dma_dir == DMA_TO_DEVICE) {
6101 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6102 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6103 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6104 ioadl_flags = IPR_IOADL_FLAGS_READ;
6105
6106 ioarcb->data_transfer_length = cpu_to_be32(len);
6107 ioarcb->ioadl_len =
6108 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6109 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6110 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6111
6112 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6113 ioadl64->flags = cpu_to_be32(ioadl_flags);
6114 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6115 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6116
6117 last_ioadl64 = ioadl64;
6118 ioadl64++;
6119 }
6120
6121 if (likely(last_ioadl64))
6122 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6123}
6124
35a39691
BK
6125/**
6126 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6127 * @ipr_cmd: ipr command struct
6128 * @qc: ATA queued command
6129 *
6130 **/
6131static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6132 struct ata_queued_cmd *qc)
6133{
6134 u32 ioadl_flags = 0;
6135 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6136 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6137 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6138 int len = qc->nbytes;
35a39691 6139 struct scatterlist *sg;
ff2aeb1e 6140 unsigned int si;
35a39691
BK
6141
6142 if (len == 0)
6143 return;
6144
6145 if (qc->dma_dir == DMA_TO_DEVICE) {
6146 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6147 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6148 ioarcb->data_transfer_length = cpu_to_be32(len);
6149 ioarcb->ioadl_len =
35a39691
BK
6150 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6151 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6152 ioadl_flags = IPR_IOADL_FLAGS_READ;
6153 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6154 ioarcb->read_ioadl_len =
6155 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6156 }
6157
ff2aeb1e 6158 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6159 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6160 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6161
6162 last_ioadl = ioadl;
6163 ioadl++;
35a39691 6164 }
3be6cbd7
JG
6165
6166 if (likely(last_ioadl))
6167 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6168}
6169
6170/**
6171 * ipr_qc_issue - Issue a SATA qc to a device
6172 * @qc: queued command
6173 *
6174 * Return value:
6175 * 0 if success
6176 **/
6177static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6178{
6179 struct ata_port *ap = qc->ap;
6180 struct ipr_sata_port *sata_port = ap->private_data;
6181 struct ipr_resource_entry *res = sata_port->res;
6182 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6183 struct ipr_cmnd *ipr_cmd;
6184 struct ipr_ioarcb *ioarcb;
6185 struct ipr_ioarcb_ata_regs *regs;
6186
6187 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 6188 return AC_ERR_SYSTEM;
35a39691
BK
6189
6190 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6191 ioarcb = &ipr_cmd->ioarcb;
35a39691 6192
a32c055f
WB
6193 if (ioa_cfg->sis64) {
6194 regs = &ipr_cmd->i.ata_ioadl.regs;
6195 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6196 } else
6197 regs = &ioarcb->u.add_data.u.regs;
6198
6199 memset(regs, 0, sizeof(*regs));
6200 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
6201
6202 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6203 ipr_cmd->qc = qc;
6204 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6205 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6209 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6210
a32c055f
WB
6211 if (ioa_cfg->sis64)
6212 ipr_build_ata_ioadl64(ipr_cmd, qc);
6213 else
6214 ipr_build_ata_ioadl(ipr_cmd, qc);
6215
35a39691
BK
6216 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6217 ipr_copy_sata_tf(regs, &qc->tf);
6218 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6219 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6220
6221 switch (qc->tf.protocol) {
6222 case ATA_PROT_NODATA:
6223 case ATA_PROT_PIO:
6224 break;
6225
6226 case ATA_PROT_DMA:
6227 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6228 break;
6229
0dc36888
TH
6230 case ATAPI_PROT_PIO:
6231 case ATAPI_PROT_NODATA:
35a39691
BK
6232 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6233 break;
6234
0dc36888 6235 case ATAPI_PROT_DMA:
35a39691
BK
6236 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6237 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6238 break;
6239
6240 default:
6241 WARN_ON(1);
0feeed82 6242 return AC_ERR_INVALID;
35a39691
BK
6243 }
6244
a32c055f
WB
6245 ipr_send_command(ipr_cmd);
6246
35a39691
BK
6247 return 0;
6248}
6249
4c9bf4e7
TH
6250/**
6251 * ipr_qc_fill_rtf - Read result TF
6252 * @qc: ATA queued command
6253 *
6254 * Return value:
6255 * true
6256 **/
6257static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6258{
6259 struct ipr_sata_port *sata_port = qc->ap->private_data;
6260 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6261 struct ata_taskfile *tf = &qc->result_tf;
6262
6263 tf->feature = g->error;
6264 tf->nsect = g->nsect;
6265 tf->lbal = g->lbal;
6266 tf->lbam = g->lbam;
6267 tf->lbah = g->lbah;
6268 tf->device = g->device;
6269 tf->command = g->status;
6270 tf->hob_nsect = g->hob_nsect;
6271 tf->hob_lbal = g->hob_lbal;
6272 tf->hob_lbam = g->hob_lbam;
6273 tf->hob_lbah = g->hob_lbah;
6274 tf->ctl = g->alt_status;
6275
6276 return true;
6277}
6278
35a39691 6279static struct ata_port_operations ipr_sata_ops = {
35a39691 6280 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6281 .hardreset = ipr_sata_reset,
35a39691 6282 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6283 .qc_prep = ata_noop_qc_prep,
6284 .qc_issue = ipr_qc_issue,
4c9bf4e7 6285 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6286 .port_start = ata_sas_port_start,
6287 .port_stop = ata_sas_port_stop
6288};
6289
6290static struct ata_port_info sata_port_info = {
9cbe056f 6291 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6292 .pio_mask = ATA_PIO4_ONLY,
6293 .mwdma_mask = ATA_MWDMA2,
6294 .udma_mask = ATA_UDMA6,
35a39691
BK
6295 .port_ops = &ipr_sata_ops
6296};
6297
1da177e4
LT
6298#ifdef CONFIG_PPC_PSERIES
6299static const u16 ipr_blocked_processors[] = {
6300 PV_NORTHSTAR,
6301 PV_PULSAR,
6302 PV_POWER4,
6303 PV_ICESTAR,
6304 PV_SSTAR,
6305 PV_POWER4p,
6306 PV_630,
6307 PV_630p
6308};
6309
6310/**
6311 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6312 * @ioa_cfg: ioa cfg struct
6313 *
6314 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6315 * certain pSeries hardware. This function determines if the given
6316 * adapter is in one of these confgurations or not.
6317 *
6318 * Return value:
6319 * 1 if adapter is not supported / 0 if adapter is supported
6320 **/
6321static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6322{
1da177e4
LT
6323 int i;
6324
44c10138
AK
6325 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6326 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6327 if (__is_processor(ipr_blocked_processors[i]))
6328 return 1;
1da177e4
LT
6329 }
6330 }
6331 return 0;
6332}
6333#else
6334#define ipr_invalid_adapter(ioa_cfg) 0
6335#endif
6336
6337/**
6338 * ipr_ioa_bringdown_done - IOA bring down completion.
6339 * @ipr_cmd: ipr command struct
6340 *
6341 * This function processes the completion of an adapter bring down.
6342 * It wakes any reset sleepers.
6343 *
6344 * Return value:
6345 * IPR_RC_JOB_RETURN
6346 **/
6347static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6348{
6349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6350
6351 ENTER;
6352 ioa_cfg->in_reset_reload = 0;
6353 ioa_cfg->reset_retries = 0;
6354 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6355 wake_up_all(&ioa_cfg->reset_wait_q);
6356
6357 spin_unlock_irq(ioa_cfg->host->host_lock);
6358 scsi_unblock_requests(ioa_cfg->host);
6359 spin_lock_irq(ioa_cfg->host->host_lock);
6360 LEAVE;
6361
6362 return IPR_RC_JOB_RETURN;
6363}
6364
6365/**
6366 * ipr_ioa_reset_done - IOA reset completion.
6367 * @ipr_cmd: ipr command struct
6368 *
6369 * This function processes the completion of an adapter reset.
6370 * It schedules any necessary mid-layer add/removes and
6371 * wakes any reset sleepers.
6372 *
6373 * Return value:
6374 * IPR_RC_JOB_RETURN
6375 **/
6376static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6377{
6378 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6379 struct ipr_resource_entry *res;
6380 struct ipr_hostrcb *hostrcb, *temp;
6381 int i = 0;
6382
6383 ENTER;
6384 ioa_cfg->in_reset_reload = 0;
6385 ioa_cfg->allow_cmds = 1;
6386 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6387 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6388
6389 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6390 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6391 ipr_trace;
6392 break;
6393 }
6394 }
6395 schedule_work(&ioa_cfg->work_q);
6396
6397 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6398 list_del(&hostrcb->queue);
6399 if (i++ < IPR_NUM_LOG_HCAMS)
6400 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6401 else
6402 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6403 }
6404
6bb04170 6405 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6406 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6407
6408 ioa_cfg->reset_retries = 0;
6409 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6410 wake_up_all(&ioa_cfg->reset_wait_q);
6411
30237853 6412 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6413 scsi_unblock_requests(ioa_cfg->host);
30237853 6414 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6415
6416 if (!ioa_cfg->allow_cmds)
6417 scsi_block_requests(ioa_cfg->host);
6418
6419 LEAVE;
6420 return IPR_RC_JOB_RETURN;
6421}
6422
6423/**
6424 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6425 * @supported_dev: supported device struct
6426 * @vpids: vendor product id struct
6427 *
6428 * Return value:
6429 * none
6430 **/
6431static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6432 struct ipr_std_inq_vpids *vpids)
6433{
6434 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6435 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6436 supported_dev->num_records = 1;
6437 supported_dev->data_length =
6438 cpu_to_be16(sizeof(struct ipr_supported_device));
6439 supported_dev->reserved = 0;
6440}
6441
6442/**
6443 * ipr_set_supported_devs - Send Set Supported Devices for a device
6444 * @ipr_cmd: ipr command struct
6445 *
a32c055f 6446 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6447 *
6448 * Return value:
6449 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6450 **/
6451static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6452{
6453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6454 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6455 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6456 struct ipr_resource_entry *res = ipr_cmd->u.res;
6457
6458 ipr_cmd->job_step = ipr_ioa_reset_done;
6459
6460 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6461 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6462 continue;
6463
6464 ipr_cmd->u.res = res;
3e7ebdfa 6465 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6466
6467 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6468 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6469 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6470
6471 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6472 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6473 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6474 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6475
a32c055f
WB
6476 ipr_init_ioadl(ipr_cmd,
6477 ioa_cfg->vpd_cbs_dma +
6478 offsetof(struct ipr_misc_cbs, supp_dev),
6479 sizeof(struct ipr_supported_device),
6480 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6481
6482 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6483 IPR_SET_SUP_DEVICE_TIMEOUT);
6484
3e7ebdfa
WB
6485 if (!ioa_cfg->sis64)
6486 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6487 return IPR_RC_JOB_RETURN;
6488 }
6489
6490 return IPR_RC_JOB_CONTINUE;
6491}
6492
6493/**
6494 * ipr_get_mode_page - Locate specified mode page
6495 * @mode_pages: mode page buffer
6496 * @page_code: page code to find
6497 * @len: minimum required length for mode page
6498 *
6499 * Return value:
6500 * pointer to mode page / NULL on failure
6501 **/
6502static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6503 u32 page_code, u32 len)
6504{
6505 struct ipr_mode_page_hdr *mode_hdr;
6506 u32 page_length;
6507 u32 length;
6508
6509 if (!mode_pages || (mode_pages->hdr.length == 0))
6510 return NULL;
6511
6512 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6513 mode_hdr = (struct ipr_mode_page_hdr *)
6514 (mode_pages->data + mode_pages->hdr.block_desc_len);
6515
6516 while (length) {
6517 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6518 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6519 return mode_hdr;
6520 break;
6521 } else {
6522 page_length = (sizeof(struct ipr_mode_page_hdr) +
6523 mode_hdr->page_length);
6524 length -= page_length;
6525 mode_hdr = (struct ipr_mode_page_hdr *)
6526 ((unsigned long)mode_hdr + page_length);
6527 }
6528 }
6529 return NULL;
6530}
6531
6532/**
6533 * ipr_check_term_power - Check for term power errors
6534 * @ioa_cfg: ioa config struct
6535 * @mode_pages: IOAFP mode pages buffer
6536 *
6537 * Check the IOAFP's mode page 28 for term power errors
6538 *
6539 * Return value:
6540 * nothing
6541 **/
6542static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6543 struct ipr_mode_pages *mode_pages)
6544{
6545 int i;
6546 int entry_length;
6547 struct ipr_dev_bus_entry *bus;
6548 struct ipr_mode_page28 *mode_page;
6549
6550 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6551 sizeof(struct ipr_mode_page28));
6552
6553 entry_length = mode_page->entry_length;
6554
6555 bus = mode_page->bus;
6556
6557 for (i = 0; i < mode_page->num_entries; i++) {
6558 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6559 dev_err(&ioa_cfg->pdev->dev,
6560 "Term power is absent on scsi bus %d\n",
6561 bus->res_addr.bus);
6562 }
6563
6564 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6565 }
6566}
6567
6568/**
6569 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6570 * @ioa_cfg: ioa config struct
6571 *
6572 * Looks through the config table checking for SES devices. If
6573 * the SES device is in the SES table indicating a maximum SCSI
6574 * bus speed, the speed is limited for the bus.
6575 *
6576 * Return value:
6577 * none
6578 **/
6579static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6580{
6581 u32 max_xfer_rate;
6582 int i;
6583
6584 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6585 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6586 ioa_cfg->bus_attr[i].bus_width);
6587
6588 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6589 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6590 }
6591}
6592
6593/**
6594 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6595 * @ioa_cfg: ioa config struct
6596 * @mode_pages: mode page 28 buffer
6597 *
6598 * Updates mode page 28 based on driver configuration
6599 *
6600 * Return value:
6601 * none
6602 **/
6603static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6604 struct ipr_mode_pages *mode_pages)
6605{
6606 int i, entry_length;
6607 struct ipr_dev_bus_entry *bus;
6608 struct ipr_bus_attributes *bus_attr;
6609 struct ipr_mode_page28 *mode_page;
6610
6611 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6612 sizeof(struct ipr_mode_page28));
6613
6614 entry_length = mode_page->entry_length;
6615
6616 /* Loop for each device bus entry */
6617 for (i = 0, bus = mode_page->bus;
6618 i < mode_page->num_entries;
6619 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6620 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6621 dev_err(&ioa_cfg->pdev->dev,
6622 "Invalid resource address reported: 0x%08X\n",
6623 IPR_GET_PHYS_LOC(bus->res_addr));
6624 continue;
6625 }
6626
6627 bus_attr = &ioa_cfg->bus_attr[i];
6628 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6629 bus->bus_width = bus_attr->bus_width;
6630 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6631 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6632 if (bus_attr->qas_enabled)
6633 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6634 else
6635 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6636 }
6637}
6638
6639/**
6640 * ipr_build_mode_select - Build a mode select command
6641 * @ipr_cmd: ipr command struct
6642 * @res_handle: resource handle to send command to
6643 * @parm: Byte 2 of Mode Sense command
6644 * @dma_addr: DMA buffer address
6645 * @xfer_len: data transfer length
6646 *
6647 * Return value:
6648 * none
6649 **/
6650static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6651 __be32 res_handle, u8 parm,
6652 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6653{
1da177e4
LT
6654 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6655
6656 ioarcb->res_handle = res_handle;
6657 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6658 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6659 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6660 ioarcb->cmd_pkt.cdb[1] = parm;
6661 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6662
a32c055f 6663 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6664}
6665
6666/**
6667 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6668 * @ipr_cmd: ipr command struct
6669 *
6670 * This function sets up the SCSI bus attributes and sends
6671 * a Mode Select for Page 28 to activate them.
6672 *
6673 * Return value:
6674 * IPR_RC_JOB_RETURN
6675 **/
6676static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6677{
6678 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6679 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6680 int length;
6681
6682 ENTER;
4733804c
BK
6683 ipr_scsi_bus_speed_limit(ioa_cfg);
6684 ipr_check_term_power(ioa_cfg, mode_pages);
6685 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6686 length = mode_pages->hdr.length + 1;
6687 mode_pages->hdr.length = 0;
1da177e4
LT
6688
6689 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6690 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6691 length);
6692
f72919ec
WB
6693 ipr_cmd->job_step = ipr_set_supported_devs;
6694 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6695 struct ipr_resource_entry, queue);
1da177e4
LT
6696 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6697
6698 LEAVE;
6699 return IPR_RC_JOB_RETURN;
6700}
6701
6702/**
6703 * ipr_build_mode_sense - Builds a mode sense command
6704 * @ipr_cmd: ipr command struct
6705 * @res: resource entry struct
6706 * @parm: Byte 2 of mode sense command
6707 * @dma_addr: DMA address of mode sense buffer
6708 * @xfer_len: Size of DMA buffer
6709 *
6710 * Return value:
6711 * none
6712 **/
6713static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6714 __be32 res_handle,
a32c055f 6715 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6716{
1da177e4
LT
6717 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6718
6719 ioarcb->res_handle = res_handle;
6720 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6721 ioarcb->cmd_pkt.cdb[2] = parm;
6722 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6723 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6724
a32c055f 6725 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6726}
6727
dfed823e 6728/**
6729 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6730 * @ipr_cmd: ipr command struct
6731 *
6732 * This function handles the failure of an IOA bringup command.
6733 *
6734 * Return value:
6735 * IPR_RC_JOB_RETURN
6736 **/
6737static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6738{
6739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6740 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 6741
6742 dev_err(&ioa_cfg->pdev->dev,
6743 "0x%02X failed with IOASC: 0x%08X\n",
6744 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6745
6746 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6747 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6748 return IPR_RC_JOB_RETURN;
6749}
6750
6751/**
6752 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6753 * @ipr_cmd: ipr command struct
6754 *
6755 * This function handles the failure of a Mode Sense to the IOAFP.
6756 * Some adapters do not handle all mode pages.
6757 *
6758 * Return value:
6759 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6760 **/
6761static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6762{
f72919ec 6763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6764 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 6765
6766 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6767 ipr_cmd->job_step = ipr_set_supported_devs;
6768 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6769 struct ipr_resource_entry, queue);
dfed823e 6770 return IPR_RC_JOB_CONTINUE;
6771 }
6772
6773 return ipr_reset_cmd_failed(ipr_cmd);
6774}
6775
1da177e4
LT
6776/**
6777 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6778 * @ipr_cmd: ipr command struct
6779 *
6780 * This function send a Page 28 mode sense to the IOA to
6781 * retrieve SCSI bus attributes.
6782 *
6783 * Return value:
6784 * IPR_RC_JOB_RETURN
6785 **/
6786static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6787{
6788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6789
6790 ENTER;
6791 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6792 0x28, ioa_cfg->vpd_cbs_dma +
6793 offsetof(struct ipr_misc_cbs, mode_pages),
6794 sizeof(struct ipr_mode_pages));
6795
6796 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6797 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6798
6799 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6800
6801 LEAVE;
6802 return IPR_RC_JOB_RETURN;
6803}
6804
ac09c349
BK
6805/**
6806 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6807 * @ipr_cmd: ipr command struct
6808 *
6809 * This function enables dual IOA RAID support if possible.
6810 *
6811 * Return value:
6812 * IPR_RC_JOB_RETURN
6813 **/
6814static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6815{
6816 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6817 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6818 struct ipr_mode_page24 *mode_page;
6819 int length;
6820
6821 ENTER;
6822 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6823 sizeof(struct ipr_mode_page24));
6824
6825 if (mode_page)
6826 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6827
6828 length = mode_pages->hdr.length + 1;
6829 mode_pages->hdr.length = 0;
6830
6831 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6832 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6833 length);
6834
6835 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6836 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6837
6838 LEAVE;
6839 return IPR_RC_JOB_RETURN;
6840}
6841
6842/**
6843 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6844 * @ipr_cmd: ipr command struct
6845 *
6846 * This function handles the failure of a Mode Sense to the IOAFP.
6847 * Some adapters do not handle all mode pages.
6848 *
6849 * Return value:
6850 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6851 **/
6852static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6853{
96d21f00 6854 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
6855
6856 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6857 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6858 return IPR_RC_JOB_CONTINUE;
6859 }
6860
6861 return ipr_reset_cmd_failed(ipr_cmd);
6862}
6863
6864/**
6865 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6866 * @ipr_cmd: ipr command struct
6867 *
6868 * This function send a mode sense to the IOA to retrieve
6869 * the IOA Advanced Function Control mode page.
6870 *
6871 * Return value:
6872 * IPR_RC_JOB_RETURN
6873 **/
6874static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6875{
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6877
6878 ENTER;
6879 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6880 0x24, ioa_cfg->vpd_cbs_dma +
6881 offsetof(struct ipr_misc_cbs, mode_pages),
6882 sizeof(struct ipr_mode_pages));
6883
6884 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6885 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6886
6887 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6888
6889 LEAVE;
6890 return IPR_RC_JOB_RETURN;
6891}
6892
1da177e4
LT
6893/**
6894 * ipr_init_res_table - Initialize the resource table
6895 * @ipr_cmd: ipr command struct
6896 *
6897 * This function looks through the existing resource table, comparing
6898 * it with the config table. This function will take care of old/new
6899 * devices and schedule adding/removing them from the mid-layer
6900 * as appropriate.
6901 *
6902 * Return value:
6903 * IPR_RC_JOB_CONTINUE
6904 **/
6905static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6906{
6907 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6908 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
6909 struct ipr_config_table_entry_wrapper cfgtew;
6910 int entries, found, flag, i;
1da177e4
LT
6911 LIST_HEAD(old_res);
6912
6913 ENTER;
3e7ebdfa
WB
6914 if (ioa_cfg->sis64)
6915 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6916 else
6917 flag = ioa_cfg->u.cfg_table->hdr.flags;
6918
6919 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
6920 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6921
6922 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6923 list_move_tail(&res->queue, &old_res);
6924
3e7ebdfa 6925 if (ioa_cfg->sis64)
438b0331 6926 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
6927 else
6928 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6929
6930 for (i = 0; i < entries; i++) {
6931 if (ioa_cfg->sis64)
6932 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6933 else
6934 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
6935 found = 0;
6936
6937 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 6938 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
6939 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6940 found = 1;
6941 break;
6942 }
6943 }
6944
6945 if (!found) {
6946 if (list_empty(&ioa_cfg->free_res_q)) {
6947 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6948 break;
6949 }
6950
6951 found = 1;
6952 res = list_entry(ioa_cfg->free_res_q.next,
6953 struct ipr_resource_entry, queue);
6954 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 6955 ipr_init_res_entry(res, &cfgtew);
1da177e4 6956 res->add_to_ml = 1;
56115598
WB
6957 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6958 res->sdev->allow_restart = 1;
1da177e4
LT
6959
6960 if (found)
3e7ebdfa 6961 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
6962 }
6963
6964 list_for_each_entry_safe(res, temp, &old_res, queue) {
6965 if (res->sdev) {
6966 res->del_from_ml = 1;
3e7ebdfa 6967 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 6968 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
6969 }
6970 }
6971
3e7ebdfa
WB
6972 list_for_each_entry_safe(res, temp, &old_res, queue) {
6973 ipr_clear_res_target(res);
6974 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6975 }
6976
ac09c349
BK
6977 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6978 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6979 else
6980 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
6981
6982 LEAVE;
6983 return IPR_RC_JOB_CONTINUE;
6984}
6985
6986/**
6987 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6988 * @ipr_cmd: ipr command struct
6989 *
6990 * This function sends a Query IOA Configuration command
6991 * to the adapter to retrieve the IOA configuration table.
6992 *
6993 * Return value:
6994 * IPR_RC_JOB_RETURN
6995 **/
6996static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6997{
6998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7000 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7001 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7002
7003 ENTER;
ac09c349
BK
7004 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7005 ioa_cfg->dual_raid = 1;
1da177e4
LT
7006 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7007 ucode_vpd->major_release, ucode_vpd->card_type,
7008 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7009 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7010 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7011
7012 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7013 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7014 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7015 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7016
3e7ebdfa 7017 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7018 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7019
7020 ipr_cmd->job_step = ipr_init_res_table;
7021
7022 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7023
7024 LEAVE;
7025 return IPR_RC_JOB_RETURN;
7026}
7027
7028/**
7029 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7030 * @ipr_cmd: ipr command struct
7031 *
7032 * This utility function sends an inquiry to the adapter.
7033 *
7034 * Return value:
7035 * none
7036 **/
7037static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7038 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7039{
7040 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7041
7042 ENTER;
7043 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7044 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7045
7046 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7047 ioarcb->cmd_pkt.cdb[1] = flags;
7048 ioarcb->cmd_pkt.cdb[2] = page;
7049 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7050
a32c055f 7051 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7052
7053 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7054 LEAVE;
7055}
7056
62275040 7057/**
7058 * ipr_inquiry_page_supported - Is the given inquiry page supported
7059 * @page0: inquiry page 0 buffer
7060 * @page: page code.
7061 *
7062 * This function determines if the specified inquiry page is supported.
7063 *
7064 * Return value:
7065 * 1 if page is supported / 0 if not
7066 **/
7067static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7068{
7069 int i;
7070
7071 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7072 if (page0->page[i] == page)
7073 return 1;
7074
7075 return 0;
7076}
7077
ac09c349
BK
7078/**
7079 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7080 * @ipr_cmd: ipr command struct
7081 *
7082 * This function sends a Page 0xD0 inquiry to the adapter
7083 * to retrieve adapter capabilities.
7084 *
7085 * Return value:
7086 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7087 **/
7088static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7089{
7090 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7091 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7092 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7093
7094 ENTER;
7095 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7096 memset(cap, 0, sizeof(*cap));
7097
7098 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7099 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7100 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7101 sizeof(struct ipr_inquiry_cap));
7102 return IPR_RC_JOB_RETURN;
7103 }
7104
7105 LEAVE;
7106 return IPR_RC_JOB_CONTINUE;
7107}
7108
1da177e4
LT
7109/**
7110 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7111 * @ipr_cmd: ipr command struct
7112 *
7113 * This function sends a Page 3 inquiry to the adapter
7114 * to retrieve software VPD information.
7115 *
7116 * Return value:
7117 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7118 **/
7119static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 7120{
7121 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 7122
7123 ENTER;
7124
ac09c349 7125 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 7126
7127 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7128 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7129 sizeof(struct ipr_inquiry_page3));
7130
7131 LEAVE;
7132 return IPR_RC_JOB_RETURN;
7133}
7134
7135/**
7136 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7137 * @ipr_cmd: ipr command struct
7138 *
7139 * This function sends a Page 0 inquiry to the adapter
7140 * to retrieve supported inquiry pages.
7141 *
7142 * Return value:
7143 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7144 **/
7145static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7146{
7147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7148 char type[5];
7149
7150 ENTER;
7151
7152 /* Grab the type out of the VPD and store it away */
7153 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7154 type[4] = '\0';
7155 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7156
62275040 7157 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7158
62275040 7159 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7160 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7161 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7162
7163 LEAVE;
7164 return IPR_RC_JOB_RETURN;
7165}
7166
7167/**
7168 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7169 * @ipr_cmd: ipr command struct
7170 *
7171 * This function sends a standard inquiry to the adapter.
7172 *
7173 * Return value:
7174 * IPR_RC_JOB_RETURN
7175 **/
7176static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7177{
7178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7179
7180 ENTER;
62275040 7181 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7182
7183 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7184 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7185 sizeof(struct ipr_ioa_vpd));
7186
7187 LEAVE;
7188 return IPR_RC_JOB_RETURN;
7189}
7190
7191/**
214777ba 7192 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7193 * @ipr_cmd: ipr command struct
7194 *
7195 * This function send an Identify Host Request Response Queue
7196 * command to establish the HRRQ with the adapter.
7197 *
7198 * Return value:
7199 * IPR_RC_JOB_RETURN
7200 **/
214777ba 7201static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7202{
7203 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7204 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7205
7206 ENTER;
7207 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7208
7209 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7210 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7211
7212 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
7213 if (ioa_cfg->sis64)
7214 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 7215 ioarcb->cmd_pkt.cdb[2] =
214777ba 7216 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 7217 ioarcb->cmd_pkt.cdb[3] =
214777ba 7218 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 7219 ioarcb->cmd_pkt.cdb[4] =
214777ba 7220 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 7221 ioarcb->cmd_pkt.cdb[5] =
214777ba 7222 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7223 ioarcb->cmd_pkt.cdb[7] =
7224 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7225 ioarcb->cmd_pkt.cdb[8] =
7226 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7227
214777ba
WB
7228 if (ioa_cfg->sis64) {
7229 ioarcb->cmd_pkt.cdb[10] =
7230 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7231 ioarcb->cmd_pkt.cdb[11] =
7232 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7233 ioarcb->cmd_pkt.cdb[12] =
7234 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7235 ioarcb->cmd_pkt.cdb[13] =
7236 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7237 }
7238
1da177e4
LT
7239 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7240
7241 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7242
7243 LEAVE;
7244 return IPR_RC_JOB_RETURN;
7245}
7246
7247/**
7248 * ipr_reset_timer_done - Adapter reset timer function
7249 * @ipr_cmd: ipr command struct
7250 *
7251 * Description: This function is used in adapter reset processing
7252 * for timing events. If the reset_cmd pointer in the IOA
7253 * config struct is not this adapter's we are doing nested
7254 * resets and fail_all_ops will take care of freeing the
7255 * command block.
7256 *
7257 * Return value:
7258 * none
7259 **/
7260static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7261{
7262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7263 unsigned long lock_flags = 0;
7264
7265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7266
7267 if (ioa_cfg->reset_cmd == ipr_cmd) {
7268 list_del(&ipr_cmd->queue);
7269 ipr_cmd->done(ipr_cmd);
7270 }
7271
7272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7273}
7274
7275/**
7276 * ipr_reset_start_timer - Start a timer for adapter reset job
7277 * @ipr_cmd: ipr command struct
7278 * @timeout: timeout value
7279 *
7280 * Description: This function is used in adapter reset processing
7281 * for timing events. If the reset_cmd pointer in the IOA
7282 * config struct is not this adapter's we are doing nested
7283 * resets and fail_all_ops will take care of freeing the
7284 * command block.
7285 *
7286 * Return value:
7287 * none
7288 **/
7289static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7290 unsigned long timeout)
7291{
7292 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7293 ipr_cmd->done = ipr_reset_ioa_job;
7294
7295 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7296 ipr_cmd->timer.expires = jiffies + timeout;
7297 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7298 add_timer(&ipr_cmd->timer);
7299}
7300
7301/**
7302 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7303 * @ioa_cfg: ioa cfg struct
7304 *
7305 * Return value:
7306 * nothing
7307 **/
7308static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7309{
7310 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7311
7312 /* Initialize Host RRQ pointers */
7313 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7314 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7315 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7316 ioa_cfg->toggle_bit = 1;
7317
7318 /* Zero out config table */
3e7ebdfa 7319 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7320}
7321
214777ba
WB
7322/**
7323 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7324 * @ipr_cmd: ipr command struct
7325 *
7326 * Return value:
7327 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7328 **/
7329static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7330{
7331 unsigned long stage, stage_time;
7332 u32 feedback;
7333 volatile u32 int_reg;
7334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7335 u64 maskval = 0;
7336
7337 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7338 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7339 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7340
7341 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7342
7343 /* sanity check the stage_time value */
438b0331
WB
7344 if (stage_time == 0)
7345 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7346 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7347 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7348 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7349 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7350
7351 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7352 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7353 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7354 stage_time = ioa_cfg->transop_timeout;
7355 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7356 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7357 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7358 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7359 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7360 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7361 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7362 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7363 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7364 return IPR_RC_JOB_CONTINUE;
7365 }
214777ba
WB
7366 }
7367
7368 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7369 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7370 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7371 ipr_cmd->done = ipr_reset_ioa_job;
7372 add_timer(&ipr_cmd->timer);
7373 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7374
7375 return IPR_RC_JOB_RETURN;
7376}
7377
1da177e4
LT
7378/**
7379 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7380 * @ipr_cmd: ipr command struct
7381 *
7382 * This function reinitializes some control blocks and
7383 * enables destructive diagnostics on the adapter.
7384 *
7385 * Return value:
7386 * IPR_RC_JOB_RETURN
7387 **/
7388static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7389{
7390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7391 volatile u32 int_reg;
7be96900 7392 volatile u64 maskval;
1da177e4
LT
7393
7394 ENTER;
214777ba 7395 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7396 ipr_init_ioa_mem(ioa_cfg);
7397
7398 ioa_cfg->allow_interrupts = 1;
8701f185
WB
7399 if (ioa_cfg->sis64) {
7400 /* Set the adapter to the correct endian mode. */
7401 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7402 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7403 }
7404
7be96900 7405 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7406
7407 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7408 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7409 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7410 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7411 return IPR_RC_JOB_CONTINUE;
7412 }
7413
7414 /* Enable destructive diagnostics on IOA */
214777ba
WB
7415 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7416
7be96900
WB
7417 if (ioa_cfg->sis64) {
7418 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7419 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7420 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7421 } else
7422 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7423
1da177e4
LT
7424 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7425
7426 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7427
214777ba
WB
7428 if (ioa_cfg->sis64) {
7429 ipr_cmd->job_step = ipr_reset_next_stage;
7430 return IPR_RC_JOB_CONTINUE;
7431 }
7432
1da177e4 7433 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7434 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7435 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7436 ipr_cmd->done = ipr_reset_ioa_job;
7437 add_timer(&ipr_cmd->timer);
7438 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7439
7440 LEAVE;
7441 return IPR_RC_JOB_RETURN;
7442}
7443
7444/**
7445 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7446 * @ipr_cmd: ipr command struct
7447 *
7448 * This function is invoked when an adapter dump has run out
7449 * of processing time.
7450 *
7451 * Return value:
7452 * IPR_RC_JOB_CONTINUE
7453 **/
7454static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7455{
7456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7457
7458 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7459 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7460 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7461 ioa_cfg->sdt_state = ABORT_DUMP;
7462
4c647e90 7463 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7464 ipr_cmd->job_step = ipr_reset_alert;
7465
7466 return IPR_RC_JOB_CONTINUE;
7467}
7468
7469/**
7470 * ipr_unit_check_no_data - Log a unit check/no data error log
7471 * @ioa_cfg: ioa config struct
7472 *
7473 * Logs an error indicating the adapter unit checked, but for some
7474 * reason, we were unable to fetch the unit check buffer.
7475 *
7476 * Return value:
7477 * nothing
7478 **/
7479static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7480{
7481 ioa_cfg->errors_logged++;
7482 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7483}
7484
7485/**
7486 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7487 * @ioa_cfg: ioa config struct
7488 *
7489 * Fetches the unit check buffer from the adapter by clocking the data
7490 * through the mailbox register.
7491 *
7492 * Return value:
7493 * nothing
7494 **/
7495static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7496{
7497 unsigned long mailbox;
7498 struct ipr_hostrcb *hostrcb;
7499 struct ipr_uc_sdt sdt;
7500 int rc, length;
65f56475 7501 u32 ioasc;
1da177e4
LT
7502
7503 mailbox = readl(ioa_cfg->ioa_mailbox);
7504
dcbad00e 7505 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7506 ipr_unit_check_no_data(ioa_cfg);
7507 return;
7508 }
7509
7510 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7511 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7512 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7513
dcbad00e
WB
7514 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7515 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7516 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7517 ipr_unit_check_no_data(ioa_cfg);
7518 return;
7519 }
7520
7521 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7522 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7523 length = be32_to_cpu(sdt.entry[0].end_token);
7524 else
7525 length = (be32_to_cpu(sdt.entry[0].end_token) -
7526 be32_to_cpu(sdt.entry[0].start_token)) &
7527 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7528
7529 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7530 struct ipr_hostrcb, queue);
7531 list_del(&hostrcb->queue);
7532 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7533
7534 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7535 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7536 (__be32 *)&hostrcb->hcam,
7537 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7538
65f56475 7539 if (!rc) {
1da177e4 7540 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7541 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7542 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7543 ioa_cfg->sdt_state == GET_DUMP)
7544 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7545 } else
1da177e4
LT
7546 ipr_unit_check_no_data(ioa_cfg);
7547
7548 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7549}
7550
110def85
WB
7551/**
7552 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7553 * @ipr_cmd: ipr command struct
7554 *
7555 * Description: This function will call to get the unit check buffer.
7556 *
7557 * Return value:
7558 * IPR_RC_JOB_RETURN
7559 **/
7560static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7561{
7562 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7563
7564 ENTER;
7565 ioa_cfg->ioa_unit_checked = 0;
7566 ipr_get_unit_check_buffer(ioa_cfg);
7567 ipr_cmd->job_step = ipr_reset_alert;
7568 ipr_reset_start_timer(ipr_cmd, 0);
7569
7570 LEAVE;
7571 return IPR_RC_JOB_RETURN;
7572}
7573
1da177e4
LT
7574/**
7575 * ipr_reset_restore_cfg_space - Restore PCI config space.
7576 * @ipr_cmd: ipr command struct
7577 *
7578 * Description: This function restores the saved PCI config space of
7579 * the adapter, fails all outstanding ops back to the callers, and
7580 * fetches the dump/unit check if applicable to this reset.
7581 *
7582 * Return value:
7583 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7584 **/
7585static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7586{
7587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 7588 u32 int_reg;
1da177e4
LT
7589
7590 ENTER;
99c965dd 7591 ioa_cfg->pdev->state_saved = true;
1d3c16a8 7592 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
7593
7594 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7595 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7596 return IPR_RC_JOB_CONTINUE;
7597 }
7598
7599 ipr_fail_all_ops(ioa_cfg);
7600
8701f185
WB
7601 if (ioa_cfg->sis64) {
7602 /* Set the adapter to the correct endian mode. */
7603 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7604 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7605 }
7606
1da177e4 7607 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
7608 if (ioa_cfg->sis64) {
7609 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7610 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7611 return IPR_RC_JOB_RETURN;
7612 } else {
7613 ioa_cfg->ioa_unit_checked = 0;
7614 ipr_get_unit_check_buffer(ioa_cfg);
7615 ipr_cmd->job_step = ipr_reset_alert;
7616 ipr_reset_start_timer(ipr_cmd, 0);
7617 return IPR_RC_JOB_RETURN;
7618 }
1da177e4
LT
7619 }
7620
7621 if (ioa_cfg->in_ioa_bringdown) {
7622 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7623 } else {
7624 ipr_cmd->job_step = ipr_reset_enable_ioa;
7625
7626 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 7627 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 7628 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
7629 if (ioa_cfg->sis64)
7630 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7631 else
7632 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
7633 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7634 schedule_work(&ioa_cfg->work_q);
7635 return IPR_RC_JOB_RETURN;
7636 }
7637 }
7638
438b0331 7639 LEAVE;
1da177e4
LT
7640 return IPR_RC_JOB_CONTINUE;
7641}
7642
e619e1a7
BK
7643/**
7644 * ipr_reset_bist_done - BIST has completed on the adapter.
7645 * @ipr_cmd: ipr command struct
7646 *
7647 * Description: Unblock config space and resume the reset process.
7648 *
7649 * Return value:
7650 * IPR_RC_JOB_CONTINUE
7651 **/
7652static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7653{
fb51ccbf
JK
7654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7655
e619e1a7 7656 ENTER;
fb51ccbf
JK
7657 if (ioa_cfg->cfg_locked)
7658 pci_cfg_access_unlock(ioa_cfg->pdev);
7659 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
7660 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7661 LEAVE;
7662 return IPR_RC_JOB_CONTINUE;
7663}
7664
1da177e4
LT
7665/**
7666 * ipr_reset_start_bist - Run BIST on the adapter.
7667 * @ipr_cmd: ipr command struct
7668 *
7669 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7670 *
7671 * Return value:
7672 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7673 **/
7674static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7675{
7676 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 7677 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
7678
7679 ENTER;
cb237ef7
WB
7680 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7681 writel(IPR_UPROCI_SIS64_START_BIST,
7682 ioa_cfg->regs.set_uproc_interrupt_reg32);
7683 else
7684 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7685
7686 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 7687 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7688 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7689 rc = IPR_RC_JOB_RETURN;
cb237ef7 7690 } else {
fb51ccbf
JK
7691 if (ioa_cfg->cfg_locked)
7692 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7693 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
7694 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7695 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
7696 }
7697
7698 LEAVE;
7699 return rc;
7700}
7701
463fc696
BK
7702/**
7703 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7704 * @ipr_cmd: ipr command struct
7705 *
7706 * Description: This clears PCI reset to the adapter and delays two seconds.
7707 *
7708 * Return value:
7709 * IPR_RC_JOB_RETURN
7710 **/
7711static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7712{
7713 ENTER;
7714 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7715 ipr_cmd->job_step = ipr_reset_bist_done;
7716 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7717 LEAVE;
7718 return IPR_RC_JOB_RETURN;
7719}
7720
7721/**
7722 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7723 * @ipr_cmd: ipr command struct
7724 *
7725 * Description: This asserts PCI reset to the adapter.
7726 *
7727 * Return value:
7728 * IPR_RC_JOB_RETURN
7729 **/
7730static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7731{
7732 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7733 struct pci_dev *pdev = ioa_cfg->pdev;
7734
7735 ENTER;
463fc696
BK
7736 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7737 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7738 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7739 LEAVE;
7740 return IPR_RC_JOB_RETURN;
7741}
7742
fb51ccbf
JK
7743/**
7744 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7745 * @ipr_cmd: ipr command struct
7746 *
7747 * Description: This attempts to block config access to the IOA.
7748 *
7749 * Return value:
7750 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7751 **/
7752static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7753{
7754 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7755 int rc = IPR_RC_JOB_CONTINUE;
7756
7757 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7758 ioa_cfg->cfg_locked = 1;
7759 ipr_cmd->job_step = ioa_cfg->reset;
7760 } else {
7761 if (ipr_cmd->u.time_left) {
7762 rc = IPR_RC_JOB_RETURN;
7763 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7764 ipr_reset_start_timer(ipr_cmd,
7765 IPR_CHECK_FOR_RESET_TIMEOUT);
7766 } else {
7767 ipr_cmd->job_step = ioa_cfg->reset;
7768 dev_err(&ioa_cfg->pdev->dev,
7769 "Timed out waiting to lock config access. Resetting anyway.\n");
7770 }
7771 }
7772
7773 return rc;
7774}
7775
7776/**
7777 * ipr_reset_block_config_access - Block config access to the IOA
7778 * @ipr_cmd: ipr command struct
7779 *
7780 * Description: This attempts to block config access to the IOA
7781 *
7782 * Return value:
7783 * IPR_RC_JOB_CONTINUE
7784 **/
7785static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7786{
7787 ipr_cmd->ioa_cfg->cfg_locked = 0;
7788 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7789 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7790 return IPR_RC_JOB_CONTINUE;
7791}
7792
1da177e4
LT
7793/**
7794 * ipr_reset_allowed - Query whether or not IOA can be reset
7795 * @ioa_cfg: ioa config struct
7796 *
7797 * Return value:
7798 * 0 if reset not allowed / non-zero if reset is allowed
7799 **/
7800static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7801{
7802 volatile u32 temp_reg;
7803
7804 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7805 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7806}
7807
7808/**
7809 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7810 * @ipr_cmd: ipr command struct
7811 *
7812 * Description: This function waits for adapter permission to run BIST,
7813 * then runs BIST. If the adapter does not give permission after a
7814 * reasonable time, we will reset the adapter anyway. The impact of
7815 * resetting the adapter without warning the adapter is the risk of
7816 * losing the persistent error log on the adapter. If the adapter is
7817 * reset while it is writing to the flash on the adapter, the flash
7818 * segment will have bad ECC and be zeroed.
7819 *
7820 * Return value:
7821 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7822 **/
7823static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7824{
7825 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7826 int rc = IPR_RC_JOB_RETURN;
7827
7828 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7829 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7830 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7831 } else {
fb51ccbf 7832 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
7833 rc = IPR_RC_JOB_CONTINUE;
7834 }
7835
7836 return rc;
7837}
7838
7839/**
8701f185 7840 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
7841 * @ipr_cmd: ipr command struct
7842 *
7843 * Description: This function alerts the adapter that it will be reset.
7844 * If memory space is not currently enabled, proceed directly
7845 * to running BIST on the adapter. The timer must always be started
7846 * so we guarantee we do not run BIST from ipr_isr.
7847 *
7848 * Return value:
7849 * IPR_RC_JOB_RETURN
7850 **/
7851static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7852{
7853 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7854 u16 cmd_reg;
7855 int rc;
7856
7857 ENTER;
7858 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7859
7860 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7861 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7862 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7863 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7864 } else {
fb51ccbf 7865 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
7866 }
7867
7868 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7869 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7870
7871 LEAVE;
7872 return IPR_RC_JOB_RETURN;
7873}
7874
7875/**
7876 * ipr_reset_ucode_download_done - Microcode download completion
7877 * @ipr_cmd: ipr command struct
7878 *
7879 * Description: This function unmaps the microcode download buffer.
7880 *
7881 * Return value:
7882 * IPR_RC_JOB_CONTINUE
7883 **/
7884static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7885{
7886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7887 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7888
7889 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7890 sglist->num_sg, DMA_TO_DEVICE);
7891
7892 ipr_cmd->job_step = ipr_reset_alert;
7893 return IPR_RC_JOB_CONTINUE;
7894}
7895
7896/**
7897 * ipr_reset_ucode_download - Download microcode to the adapter
7898 * @ipr_cmd: ipr command struct
7899 *
7900 * Description: This function checks to see if it there is microcode
7901 * to download to the adapter. If there is, a download is performed.
7902 *
7903 * Return value:
7904 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7905 **/
7906static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7907{
7908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7909 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7910
7911 ENTER;
7912 ipr_cmd->job_step = ipr_reset_alert;
7913
7914 if (!sglist)
7915 return IPR_RC_JOB_CONTINUE;
7916
7917 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7918 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7919 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7920 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7921 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7922 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7923 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7924
a32c055f
WB
7925 if (ioa_cfg->sis64)
7926 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7927 else
7928 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
7929 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7930
7931 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7932 IPR_WRITE_BUFFER_TIMEOUT);
7933
7934 LEAVE;
7935 return IPR_RC_JOB_RETURN;
7936}
7937
7938/**
7939 * ipr_reset_shutdown_ioa - Shutdown the adapter
7940 * @ipr_cmd: ipr command struct
7941 *
7942 * Description: This function issues an adapter shutdown of the
7943 * specified type to the specified adapter as part of the
7944 * adapter reset job.
7945 *
7946 * Return value:
7947 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7948 **/
7949static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7950{
7951 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7952 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7953 unsigned long timeout;
7954 int rc = IPR_RC_JOB_CONTINUE;
7955
7956 ENTER;
7957 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7958 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7959 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7960 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7961 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7962
ac09c349
BK
7963 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7964 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
7965 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7966 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
7967 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7968 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 7969 else
ac09c349 7970 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
7971
7972 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7973
7974 rc = IPR_RC_JOB_RETURN;
7975 ipr_cmd->job_step = ipr_reset_ucode_download;
7976 } else
7977 ipr_cmd->job_step = ipr_reset_alert;
7978
7979 LEAVE;
7980 return rc;
7981}
7982
7983/**
7984 * ipr_reset_ioa_job - Adapter reset job
7985 * @ipr_cmd: ipr command struct
7986 *
7987 * Description: This function is the job router for the adapter reset job.
7988 *
7989 * Return value:
7990 * none
7991 **/
7992static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7993{
7994 u32 rc, ioasc;
1da177e4
LT
7995 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7996
7997 do {
96d21f00 7998 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
7999
8000 if (ioa_cfg->reset_cmd != ipr_cmd) {
8001 /*
8002 * We are doing nested adapter resets and this is
8003 * not the current reset job.
8004 */
8005 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8006 return;
8007 }
8008
8009 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 8010 rc = ipr_cmd->job_step_failed(ipr_cmd);
8011 if (rc == IPR_RC_JOB_RETURN)
8012 return;
1da177e4
LT
8013 }
8014
8015 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8016 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4
LT
8017 rc = ipr_cmd->job_step(ipr_cmd);
8018 } while(rc == IPR_RC_JOB_CONTINUE);
8019}
8020
8021/**
8022 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8023 * @ioa_cfg: ioa config struct
8024 * @job_step: first job step of reset job
8025 * @shutdown_type: shutdown type
8026 *
8027 * Description: This function will initiate the reset of the given adapter
8028 * starting at the selected job step.
8029 * If the caller needs to wait on the completion of the reset,
8030 * the caller must sleep on the reset_wait_q.
8031 *
8032 * Return value:
8033 * none
8034 **/
8035static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8036 int (*job_step) (struct ipr_cmnd *),
8037 enum ipr_shutdown_type shutdown_type)
8038{
8039 struct ipr_cmnd *ipr_cmd;
8040
8041 ioa_cfg->in_reset_reload = 1;
8042 ioa_cfg->allow_cmds = 0;
8043 scsi_block_requests(ioa_cfg->host);
8044
8045 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8046 ioa_cfg->reset_cmd = ipr_cmd;
8047 ipr_cmd->job_step = job_step;
8048 ipr_cmd->u.shutdown_type = shutdown_type;
8049
8050 ipr_reset_ioa_job(ipr_cmd);
8051}
8052
8053/**
8054 * ipr_initiate_ioa_reset - Initiate an adapter reset
8055 * @ioa_cfg: ioa config struct
8056 * @shutdown_type: shutdown type
8057 *
8058 * Description: This function will initiate the reset of the given adapter.
8059 * If the caller needs to wait on the completion of the reset,
8060 * the caller must sleep on the reset_wait_q.
8061 *
8062 * Return value:
8063 * none
8064 **/
8065static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8066 enum ipr_shutdown_type shutdown_type)
8067{
8068 if (ioa_cfg->ioa_is_dead)
8069 return;
8070
41e9a696
BK
8071 if (ioa_cfg->in_reset_reload) {
8072 if (ioa_cfg->sdt_state == GET_DUMP)
8073 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8074 else if (ioa_cfg->sdt_state == READ_DUMP)
8075 ioa_cfg->sdt_state = ABORT_DUMP;
8076 }
1da177e4
LT
8077
8078 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8079 dev_err(&ioa_cfg->pdev->dev,
8080 "IOA taken offline - error recovery failed\n");
8081
8082 ioa_cfg->reset_retries = 0;
8083 ioa_cfg->ioa_is_dead = 1;
8084
8085 if (ioa_cfg->in_ioa_bringdown) {
8086 ioa_cfg->reset_cmd = NULL;
8087 ioa_cfg->in_reset_reload = 0;
8088 ipr_fail_all_ops(ioa_cfg);
8089 wake_up_all(&ioa_cfg->reset_wait_q);
8090
8091 spin_unlock_irq(ioa_cfg->host->host_lock);
8092 scsi_unblock_requests(ioa_cfg->host);
8093 spin_lock_irq(ioa_cfg->host->host_lock);
8094 return;
8095 } else {
8096 ioa_cfg->in_ioa_bringdown = 1;
8097 shutdown_type = IPR_SHUTDOWN_NONE;
8098 }
8099 }
8100
8101 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8102 shutdown_type);
8103}
8104
f8a88b19
LV
8105/**
8106 * ipr_reset_freeze - Hold off all I/O activity
8107 * @ipr_cmd: ipr command struct
8108 *
8109 * Description: If the PCI slot is frozen, hold off all I/O
8110 * activity; then, as soon as the slot is available again,
8111 * initiate an adapter reset.
8112 */
8113static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8114{
8115 /* Disallow new interrupts, avoid loop */
8116 ipr_cmd->ioa_cfg->allow_interrupts = 0;
8117 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8118 ipr_cmd->done = ipr_reset_ioa_job;
8119 return IPR_RC_JOB_RETURN;
8120}
8121
8122/**
8123 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8124 * @pdev: PCI device struct
8125 *
8126 * Description: This routine is called to tell us that the PCI bus
8127 * is down. Can't do anything here, except put the device driver
8128 * into a holding pattern, waiting for the PCI bus to come back.
8129 */
8130static void ipr_pci_frozen(struct pci_dev *pdev)
8131{
8132 unsigned long flags = 0;
8133 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8134
8135 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8136 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8138}
8139
8140/**
8141 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8142 * @pdev: PCI device struct
8143 *
8144 * Description: This routine is called by the pci error recovery
8145 * code after the PCI slot has been reset, just before we
8146 * should resume normal operations.
8147 */
8148static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8149{
8150 unsigned long flags = 0;
8151 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8152
8153 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
8154 if (ioa_cfg->needs_warm_reset)
8155 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8156 else
8157 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8158 IPR_SHUTDOWN_NONE);
f8a88b19
LV
8159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8160 return PCI_ERS_RESULT_RECOVERED;
8161}
8162
8163/**
8164 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8165 * @pdev: PCI device struct
8166 *
8167 * Description: This routine is called when the PCI bus has
8168 * permanently failed.
8169 */
8170static void ipr_pci_perm_failure(struct pci_dev *pdev)
8171{
8172 unsigned long flags = 0;
8173 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8174
8175 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8176 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8177 ioa_cfg->sdt_state = ABORT_DUMP;
8178 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8179 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 8180 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
8181 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8182 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8183}
8184
8185/**
8186 * ipr_pci_error_detected - Called when a PCI error is detected.
8187 * @pdev: PCI device struct
8188 * @state: PCI channel state
8189 *
8190 * Description: Called when a PCI error is detected.
8191 *
8192 * Return value:
8193 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8194 */
8195static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8196 pci_channel_state_t state)
8197{
8198 switch (state) {
8199 case pci_channel_io_frozen:
8200 ipr_pci_frozen(pdev);
8201 return PCI_ERS_RESULT_NEED_RESET;
8202 case pci_channel_io_perm_failure:
8203 ipr_pci_perm_failure(pdev);
8204 return PCI_ERS_RESULT_DISCONNECT;
8205 break;
8206 default:
8207 break;
8208 }
8209 return PCI_ERS_RESULT_NEED_RESET;
8210}
8211
1da177e4
LT
8212/**
8213 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8214 * @ioa_cfg: ioa cfg struct
8215 *
8216 * Description: This is the second phase of adapter intialization
8217 * This function takes care of initilizing the adapter to the point
8218 * where it can accept new commands.
8219
8220 * Return value:
b1c11812 8221 * 0 on success / -EIO on failure
1da177e4
LT
8222 **/
8223static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8224{
8225 int rc = 0;
8226 unsigned long host_lock_flags = 0;
8227
8228 ENTER;
8229 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8230 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce 8231 if (ioa_cfg->needs_hard_reset) {
8232 ioa_cfg->needs_hard_reset = 0;
8233 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8234 } else
8235 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8236 IPR_SHUTDOWN_NONE);
1da177e4
LT
8237
8238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8239 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8240 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8241
8242 if (ioa_cfg->ioa_is_dead) {
8243 rc = -EIO;
8244 } else if (ipr_invalid_adapter(ioa_cfg)) {
8245 if (!ipr_testmode)
8246 rc = -EIO;
8247
8248 dev_err(&ioa_cfg->pdev->dev,
8249 "Adapter not supported in this hardware configuration.\n");
8250 }
8251
8252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8253
8254 LEAVE;
8255 return rc;
8256}
8257
8258/**
8259 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8260 * @ioa_cfg: ioa config struct
8261 *
8262 * Return value:
8263 * none
8264 **/
8265static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8266{
8267 int i;
8268
8269 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8270 if (ioa_cfg->ipr_cmnd_list[i])
8271 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8272 ioa_cfg->ipr_cmnd_list[i],
8273 ioa_cfg->ipr_cmnd_list_dma[i]);
8274
8275 ioa_cfg->ipr_cmnd_list[i] = NULL;
8276 }
8277
8278 if (ioa_cfg->ipr_cmd_pool)
8279 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8280
8281 ioa_cfg->ipr_cmd_pool = NULL;
8282}
8283
8284/**
8285 * ipr_free_mem - Frees memory allocated for an adapter
8286 * @ioa_cfg: ioa cfg struct
8287 *
8288 * Return value:
8289 * nothing
8290 **/
8291static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8292{
8293 int i;
8294
8295 kfree(ioa_cfg->res_entries);
8296 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8297 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8298 ipr_free_cmd_blks(ioa_cfg);
8299 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8300 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
8301 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8302 ioa_cfg->u.cfg_table,
1da177e4
LT
8303 ioa_cfg->cfg_table_dma);
8304
8305 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8306 pci_free_consistent(ioa_cfg->pdev,
8307 sizeof(struct ipr_hostrcb),
8308 ioa_cfg->hostrcb[i],
8309 ioa_cfg->hostrcb_dma[i]);
8310 }
8311
8312 ipr_free_dump(ioa_cfg);
1da177e4
LT
8313 kfree(ioa_cfg->trace);
8314}
8315
8316/**
8317 * ipr_free_all_resources - Free all allocated resources for an adapter.
8318 * @ipr_cmd: ipr command struct
8319 *
8320 * This function frees all allocated resources for the
8321 * specified adapter.
8322 *
8323 * Return value:
8324 * none
8325 **/
8326static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8327{
8328 struct pci_dev *pdev = ioa_cfg->pdev;
8329
8330 ENTER;
8331 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 8332 pci_disable_msi(pdev);
1da177e4
LT
8333 iounmap(ioa_cfg->hdw_dma_regs);
8334 pci_release_regions(pdev);
8335 ipr_free_mem(ioa_cfg);
8336 scsi_host_put(ioa_cfg->host);
8337 pci_disable_device(pdev);
8338 LEAVE;
8339}
8340
8341/**
8342 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8343 * @ioa_cfg: ioa config struct
8344 *
8345 * Return value:
8346 * 0 on success / -ENOMEM on allocation failure
8347 **/
8348static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8349{
8350 struct ipr_cmnd *ipr_cmd;
8351 struct ipr_ioarcb *ioarcb;
8352 dma_addr_t dma_addr;
8353 int i;
8354
8355 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
a32c055f 8356 sizeof(struct ipr_cmnd), 16, 0);
1da177e4
LT
8357
8358 if (!ioa_cfg->ipr_cmd_pool)
8359 return -ENOMEM;
8360
8361 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
e94b1766 8362 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8363
8364 if (!ipr_cmd) {
8365 ipr_free_cmd_blks(ioa_cfg);
8366 return -ENOMEM;
8367 }
8368
8369 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8370 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8371 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8372
8373 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8374 ipr_cmd->dma_addr = dma_addr;
8375 if (ioa_cfg->sis64)
8376 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8377 else
8378 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8379
1da177e4 8380 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8381 if (ioa_cfg->sis64) {
8382 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8383 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8384 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8385 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8386 } else {
8387 ioarcb->write_ioadl_addr =
8388 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8389 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8390 ioarcb->ioasa_host_pci_addr =
96d21f00 8391 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8392 }
1da177e4
LT
8393 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8394 ipr_cmd->cmd_index = i;
8395 ipr_cmd->ioa_cfg = ioa_cfg;
8396 ipr_cmd->sense_buffer_dma = dma_addr +
8397 offsetof(struct ipr_cmnd, sense_buffer);
8398
8399 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8400 }
8401
8402 return 0;
8403}
8404
8405/**
8406 * ipr_alloc_mem - Allocate memory for an adapter
8407 * @ioa_cfg: ioa config struct
8408 *
8409 * Return value:
8410 * 0 on success / non-zero for error
8411 **/
8412static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8413{
8414 struct pci_dev *pdev = ioa_cfg->pdev;
8415 int i, rc = -ENOMEM;
8416
8417 ENTER;
0bc42e35 8418 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8419 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8420
8421 if (!ioa_cfg->res_entries)
8422 goto out;
8423
3e7ebdfa
WB
8424 if (ioa_cfg->sis64) {
8425 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8426 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8427 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8428 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8429 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8430 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8431 }
8432
8433 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8434 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8435 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8436 }
1da177e4
LT
8437
8438 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8439 sizeof(struct ipr_misc_cbs),
8440 &ioa_cfg->vpd_cbs_dma);
8441
8442 if (!ioa_cfg->vpd_cbs)
8443 goto out_free_res_entries;
8444
8445 if (ipr_alloc_cmd_blks(ioa_cfg))
8446 goto out_free_vpd_cbs;
8447
8448 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8449 sizeof(u32) * IPR_NUM_CMD_BLKS,
8450 &ioa_cfg->host_rrq_dma);
8451
8452 if (!ioa_cfg->host_rrq)
8453 goto out_ipr_free_cmd_blocks;
8454
3e7ebdfa
WB
8455 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8456 ioa_cfg->cfg_table_size,
8457 &ioa_cfg->cfg_table_dma);
1da177e4 8458
3e7ebdfa 8459 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8460 goto out_free_host_rrq;
8461
8462 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8463 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8464 sizeof(struct ipr_hostrcb),
8465 &ioa_cfg->hostrcb_dma[i]);
8466
8467 if (!ioa_cfg->hostrcb[i])
8468 goto out_free_hostrcb_dma;
8469
8470 ioa_cfg->hostrcb[i]->hostrcb_dma =
8471 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8472 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8473 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8474 }
8475
0bc42e35 8476 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8477 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8478
8479 if (!ioa_cfg->trace)
8480 goto out_free_hostrcb_dma;
8481
1da177e4
LT
8482 rc = 0;
8483out:
8484 LEAVE;
8485 return rc;
8486
8487out_free_hostrcb_dma:
8488 while (i-- > 0) {
8489 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8490 ioa_cfg->hostrcb[i],
8491 ioa_cfg->hostrcb_dma[i]);
8492 }
3e7ebdfa
WB
8493 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8494 ioa_cfg->u.cfg_table,
8495 ioa_cfg->cfg_table_dma);
1da177e4
LT
8496out_free_host_rrq:
8497 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8498 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8499out_ipr_free_cmd_blocks:
8500 ipr_free_cmd_blks(ioa_cfg);
8501out_free_vpd_cbs:
8502 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8503 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8504out_free_res_entries:
8505 kfree(ioa_cfg->res_entries);
8506 goto out;
8507}
8508
8509/**
8510 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8511 * @ioa_cfg: ioa config struct
8512 *
8513 * Return value:
8514 * none
8515 **/
8516static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8517{
8518 int i;
8519
8520 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8521 ioa_cfg->bus_attr[i].bus = i;
8522 ioa_cfg->bus_attr[i].qas_enabled = 0;
8523 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8524 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8525 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8526 else
8527 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8528 }
8529}
8530
8531/**
8532 * ipr_init_ioa_cfg - Initialize IOA config struct
8533 * @ioa_cfg: ioa config struct
8534 * @host: scsi host struct
8535 * @pdev: PCI dev struct
8536 *
8537 * Return value:
8538 * none
8539 **/
8540static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8541 struct Scsi_Host *host, struct pci_dev *pdev)
8542{
8543 const struct ipr_interrupt_offsets *p;
8544 struct ipr_interrupts *t;
8545 void __iomem *base;
8546
8547 ioa_cfg->host = host;
8548 ioa_cfg->pdev = pdev;
8549 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8550 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8551 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8552 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8553 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8554 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8555 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8556 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8557 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8558 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8559
8560 INIT_LIST_HEAD(&ioa_cfg->free_q);
8561 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8562 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8563 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8564 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8565 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8566 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8567 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8568 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8569 ioa_cfg->sdt_state = INACTIVE;
8570
8571 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8572 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8573
3e7ebdfa
WB
8574 if (ioa_cfg->sis64) {
8575 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8576 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8577 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8578 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8579 } else {
8580 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8581 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8582 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8583 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8584 }
1da177e4
LT
8585 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8586 host->unique_id = host->host_no;
8587 host->max_cmd_len = IPR_MAX_CDB_LEN;
8588 pci_set_drvdata(pdev, ioa_cfg);
8589
8590 p = &ioa_cfg->chip_cfg->regs;
8591 t = &ioa_cfg->regs;
8592 base = ioa_cfg->hdw_dma_regs;
8593
8594 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8595 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8596 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8597 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8598 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8599 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8600 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8601 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8602 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8603 t->ioarrin_reg = base + p->ioarrin_reg;
8604 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8605 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8606 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8607 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8608 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8609 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8610
8611 if (ioa_cfg->sis64) {
214777ba 8612 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8613 t->dump_addr_reg = base + p->dump_addr_reg;
8614 t->dump_data_reg = base + p->dump_data_reg;
8701f185 8615 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 8616 }
1da177e4
LT
8617}
8618
8619/**
1be7bd82 8620 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8621 * @dev_id: PCI device id struct
8622 *
8623 * Return value:
1be7bd82 8624 * ptr to chip information on success / NULL on failure
1da177e4 8625 **/
1be7bd82
WB
8626static const struct ipr_chip_t * __devinit
8627ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8628{
8629 int i;
8630
1da177e4
LT
8631 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8632 if (ipr_chip[i].vendor == dev_id->vendor &&
8633 ipr_chip[i].device == dev_id->device)
1be7bd82 8634 return &ipr_chip[i];
1da177e4
LT
8635 return NULL;
8636}
8637
95fecd90
WB
8638/**
8639 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8640 * @pdev: PCI device struct
8641 *
8642 * Description: Simply set the msi_received flag to 1 indicating that
8643 * Message Signaled Interrupts are supported.
8644 *
8645 * Return value:
8646 * 0 on success / non-zero on failure
8647 **/
8648static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8649{
8650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8651 unsigned long lock_flags = 0;
8652 irqreturn_t rc = IRQ_HANDLED;
8653
8654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8655
8656 ioa_cfg->msi_received = 1;
8657 wake_up(&ioa_cfg->msi_wait_q);
8658
8659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8660 return rc;
8661}
8662
8663/**
8664 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8665 * @pdev: PCI device struct
8666 *
8667 * Description: The return value from pci_enable_msi() can not always be
8668 * trusted. This routine sets up and initiates a test interrupt to determine
8669 * if the interrupt is received via the ipr_test_intr() service routine.
8670 * If the tests fails, the driver will fall back to LSI.
8671 *
8672 * Return value:
8673 * 0 on success / non-zero on failure
8674 **/
8675static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8676 struct pci_dev *pdev)
8677{
8678 int rc;
8679 volatile u32 int_reg;
8680 unsigned long lock_flags = 0;
8681
8682 ENTER;
8683
8684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8685 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8686 ioa_cfg->msi_received = 0;
8687 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8688 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8689 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8690 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8691
8692 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8693 if (rc) {
8694 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8695 return rc;
8696 } else if (ipr_debug)
8697 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8698
214777ba 8699 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8700 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8701 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8702 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8703
8704 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8705 if (!ioa_cfg->msi_received) {
8706 /* MSI test failed */
8707 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8708 rc = -EOPNOTSUPP;
8709 } else if (ipr_debug)
8710 dev_info(&pdev->dev, "MSI test succeeded.\n");
8711
8712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8713
8714 free_irq(pdev->irq, ioa_cfg);
8715
8716 LEAVE;
8717
8718 return rc;
8719}
8720
1da177e4
LT
8721/**
8722 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8723 * @pdev: PCI device struct
8724 * @dev_id: PCI device id struct
8725 *
8726 * Return value:
8727 * 0 on success / non-zero on failure
8728 **/
8729static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8730 const struct pci_device_id *dev_id)
8731{
8732 struct ipr_ioa_cfg *ioa_cfg;
8733 struct Scsi_Host *host;
8734 unsigned long ipr_regs_pci;
8735 void __iomem *ipr_regs;
a2a65a3e 8736 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8737 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8738
8739 ENTER;
8740
8741 if ((rc = pci_enable_device(pdev))) {
8742 dev_err(&pdev->dev, "Cannot enable adapter\n");
8743 goto out;
8744 }
8745
8746 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8747
8748 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8749
8750 if (!host) {
8751 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8752 rc = -ENOMEM;
8753 goto out_disable;
8754 }
8755
8756 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8757 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
35a39691
BK
8758 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8759 sata_port_info.flags, &ipr_sata_ops);
1da177e4 8760
1be7bd82 8761 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8762
1be7bd82 8763 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8764 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8765 dev_id->vendor, dev_id->device);
8766 goto out_scsi_host_put;
8767 }
8768
a32c055f
WB
8769 /* set SIS 32 or SIS 64 */
8770 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 8771 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 8772 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
1be7bd82 8773
5469cb5b
BK
8774 if (ipr_transop_timeout)
8775 ioa_cfg->transop_timeout = ipr_transop_timeout;
8776 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8777 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8778 else
8779 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8780
44c10138 8781 ioa_cfg->revid = pdev->revision;
463fc696 8782
1da177e4
LT
8783 ipr_regs_pci = pci_resource_start(pdev, 0);
8784
8785 rc = pci_request_regions(pdev, IPR_NAME);
8786 if (rc < 0) {
8787 dev_err(&pdev->dev,
8788 "Couldn't register memory range of registers\n");
8789 goto out_scsi_host_put;
8790 }
8791
25729a7f 8792 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8793
8794 if (!ipr_regs) {
8795 dev_err(&pdev->dev,
8796 "Couldn't map memory range of registers\n");
8797 rc = -ENOMEM;
8798 goto out_release_regions;
8799 }
8800
8801 ioa_cfg->hdw_dma_regs = ipr_regs;
8802 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8803 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8804
8805 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8806
8807 pci_set_master(pdev);
8808
a32c055f
WB
8809 if (ioa_cfg->sis64) {
8810 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8811 if (rc < 0) {
8812 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8813 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8814 }
8815
8816 } else
8817 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8818
1da177e4
LT
8819 if (rc < 0) {
8820 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8821 goto cleanup_nomem;
8822 }
8823
8824 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8825 ioa_cfg->chip_cfg->cache_line_size);
8826
8827 if (rc != PCIBIOS_SUCCESSFUL) {
8828 dev_err(&pdev->dev, "Write of cache line size failed\n");
8829 rc = -EIO;
8830 goto cleanup_nomem;
8831 }
8832
95fecd90 8833 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8834 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8835 rc = ipr_test_msi(ioa_cfg, pdev);
8836 if (rc == -EOPNOTSUPP)
8837 pci_disable_msi(pdev);
8838 else if (rc)
8839 goto out_msi_disable;
8840 else
8841 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8842 } else if (ipr_debug)
8843 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8844
1da177e4
LT
8845 /* Save away PCI config space for use following IOA reset */
8846 rc = pci_save_state(pdev);
8847
8848 if (rc != PCIBIOS_SUCCESSFUL) {
8849 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8850 rc = -EIO;
f170c684 8851 goto out_msi_disable;
1da177e4
LT
8852 }
8853
8854 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 8855 goto out_msi_disable;
1da177e4
LT
8856
8857 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 8858 goto out_msi_disable;
1da177e4 8859
3e7ebdfa
WB
8860 if (ioa_cfg->sis64)
8861 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8862 + ((sizeof(struct ipr_config_table_entry64)
8863 * ioa_cfg->max_devs_supported)));
8864 else
8865 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8866 + ((sizeof(struct ipr_config_table_entry)
8867 * ioa_cfg->max_devs_supported)));
8868
1da177e4
LT
8869 rc = ipr_alloc_mem(ioa_cfg);
8870 if (rc < 0) {
8871 dev_err(&pdev->dev,
8872 "Couldn't allocate enough memory for device driver!\n");
f170c684 8873 goto out_msi_disable;
1da177e4
LT
8874 }
8875
ce155cce 8876 /*
8877 * If HRRQ updated interrupt is not masked, or reset alert is set,
8878 * the card is in an unknown state and needs a hard reset
8879 */
214777ba
WB
8880 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8881 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8882 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 8883 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8884 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 8885 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
8886 ioa_cfg->needs_hard_reset = 1;
8887 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8888 ioa_cfg->ioa_unit_checked = 1;
ce155cce 8889
1da177e4 8890 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
8891 rc = request_irq(pdev->irq, ipr_isr,
8892 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8893 IPR_NAME, ioa_cfg);
1da177e4
LT
8894
8895 if (rc) {
8896 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8897 pdev->irq, rc);
8898 goto cleanup_nolog;
8899 }
8900
463fc696
BK
8901 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8902 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8903 ioa_cfg->needs_warm_reset = 1;
8904 ioa_cfg->reset = ipr_reset_slot_reset;
8905 } else
8906 ioa_cfg->reset = ipr_reset_start_bist;
8907
1da177e4
LT
8908 spin_lock(&ipr_driver_lock);
8909 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8910 spin_unlock(&ipr_driver_lock);
8911
8912 LEAVE;
8913out:
8914 return rc;
8915
8916cleanup_nolog:
8917 ipr_free_mem(ioa_cfg);
95fecd90
WB
8918out_msi_disable:
8919 pci_disable_msi(pdev);
f170c684
JL
8920cleanup_nomem:
8921 iounmap(ipr_regs);
1da177e4
LT
8922out_release_regions:
8923 pci_release_regions(pdev);
8924out_scsi_host_put:
8925 scsi_host_put(host);
8926out_disable:
8927 pci_disable_device(pdev);
8928 goto out;
8929}
8930
8931/**
8932 * ipr_scan_vsets - Scans for VSET devices
8933 * @ioa_cfg: ioa config struct
8934 *
8935 * Description: Since the VSET resources do not follow SAM in that we can have
8936 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8937 *
8938 * Return value:
8939 * none
8940 **/
8941static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8942{
8943 int target, lun;
8944
8945 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8946 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8947 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8948}
8949
8950/**
8951 * ipr_initiate_ioa_bringdown - Bring down an adapter
8952 * @ioa_cfg: ioa config struct
8953 * @shutdown_type: shutdown type
8954 *
8955 * Description: This function will initiate bringing down the adapter.
8956 * This consists of issuing an IOA shutdown to the adapter
8957 * to flush the cache, and running BIST.
8958 * If the caller needs to wait on the completion of the reset,
8959 * the caller must sleep on the reset_wait_q.
8960 *
8961 * Return value:
8962 * none
8963 **/
8964static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8965 enum ipr_shutdown_type shutdown_type)
8966{
8967 ENTER;
8968 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8969 ioa_cfg->sdt_state = ABORT_DUMP;
8970 ioa_cfg->reset_retries = 0;
8971 ioa_cfg->in_ioa_bringdown = 1;
8972 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8973 LEAVE;
8974}
8975
8976/**
8977 * __ipr_remove - Remove a single adapter
8978 * @pdev: pci device struct
8979 *
8980 * Adapter hot plug remove entry point.
8981 *
8982 * Return value:
8983 * none
8984 **/
8985static void __ipr_remove(struct pci_dev *pdev)
8986{
8987 unsigned long host_lock_flags = 0;
8988 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8989 ENTER;
8990
8991 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
970ea294
BK
8992 while(ioa_cfg->in_reset_reload) {
8993 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8994 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8995 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8996 }
8997
1da177e4
LT
8998 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8999
9000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9001 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
a684b8da 9002 flush_work_sync(&ioa_cfg->work_q);
1da177e4
LT
9003 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9004
9005 spin_lock(&ipr_driver_lock);
9006 list_del(&ioa_cfg->queue);
9007 spin_unlock(&ipr_driver_lock);
9008
9009 if (ioa_cfg->sdt_state == ABORT_DUMP)
9010 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9012
9013 ipr_free_all_resources(ioa_cfg);
9014
9015 LEAVE;
9016}
9017
9018/**
9019 * ipr_remove - IOA hot plug remove entry point
9020 * @pdev: pci device struct
9021 *
9022 * Adapter hot plug remove entry point.
9023 *
9024 * Return value:
9025 * none
9026 **/
f381642d 9027static void __devexit ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9028{
9029 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9030
9031 ENTER;
9032
ee959b00 9033 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9034 &ipr_trace_attr);
ee959b00 9035 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9036 &ipr_dump_attr);
9037 scsi_remove_host(ioa_cfg->host);
9038
9039 __ipr_remove(pdev);
9040
9041 LEAVE;
9042}
9043
9044/**
9045 * ipr_probe - Adapter hot plug add entry point
9046 *
9047 * Return value:
9048 * 0 on success / non-zero on failure
9049 **/
9050static int __devinit ipr_probe(struct pci_dev *pdev,
9051 const struct pci_device_id *dev_id)
9052{
9053 struct ipr_ioa_cfg *ioa_cfg;
9054 int rc;
9055
9056 rc = ipr_probe_ioa(pdev, dev_id);
9057
9058 if (rc)
9059 return rc;
9060
9061 ioa_cfg = pci_get_drvdata(pdev);
9062 rc = ipr_probe_ioa_part2(ioa_cfg);
9063
9064 if (rc) {
9065 __ipr_remove(pdev);
9066 return rc;
9067 }
9068
9069 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9070
9071 if (rc) {
9072 __ipr_remove(pdev);
9073 return rc;
9074 }
9075
ee959b00 9076 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9077 &ipr_trace_attr);
9078
9079 if (rc) {
9080 scsi_remove_host(ioa_cfg->host);
9081 __ipr_remove(pdev);
9082 return rc;
9083 }
9084
ee959b00 9085 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9086 &ipr_dump_attr);
9087
9088 if (rc) {
ee959b00 9089 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9090 &ipr_trace_attr);
9091 scsi_remove_host(ioa_cfg->host);
9092 __ipr_remove(pdev);
9093 return rc;
9094 }
9095
9096 scsi_scan_host(ioa_cfg->host);
9097 ipr_scan_vsets(ioa_cfg);
9098 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9099 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 9100 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9101 schedule_work(&ioa_cfg->work_q);
9102 return 0;
9103}
9104
9105/**
9106 * ipr_shutdown - Shutdown handler.
d18c3db5 9107 * @pdev: pci device struct
1da177e4
LT
9108 *
9109 * This function is invoked upon system shutdown/reboot. It will issue
9110 * an adapter shutdown to the adapter to flush the write cache.
9111 *
9112 * Return value:
9113 * none
9114 **/
d18c3db5 9115static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9116{
d18c3db5 9117 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
9118 unsigned long lock_flags = 0;
9119
9120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
970ea294
BK
9121 while(ioa_cfg->in_reset_reload) {
9122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9123 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9124 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9125 }
9126
1da177e4
LT
9127 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9128 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9129 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9130}
9131
9132static struct pci_device_id ipr_pci_table[] __devinitdata = {
9133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9134 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9135 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9136 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9137 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9138 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9139 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9140 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9141 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9142 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9143 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9144 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9145 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9146 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9147 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9148 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9149 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9150 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9151 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9152 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9153 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9154 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9155 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9156 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9157 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9159 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9160 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9161 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9162 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9163 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9164 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9165 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 9166 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
9167 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9168 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
9169 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9170 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
9171 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9172 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 9173 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 9174 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 9175 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 9176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 9177 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 9178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 9179 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 9180 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9181 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9182 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9183 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9184 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9185 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
9186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9189 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
5a918353
WB
9192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
9194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9195 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 9196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9197 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 9198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 9200 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
9202 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9204 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
1da177e4
LT
9206 { }
9207};
9208MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9209
f8a88b19
LV
9210static struct pci_error_handlers ipr_err_handler = {
9211 .error_detected = ipr_pci_error_detected,
9212 .slot_reset = ipr_pci_slot_reset,
9213};
9214
1da177e4
LT
9215static struct pci_driver ipr_driver = {
9216 .name = IPR_NAME,
9217 .id_table = ipr_pci_table,
9218 .probe = ipr_probe,
f381642d 9219 .remove = __devexit_p(ipr_remove),
d18c3db5 9220 .shutdown = ipr_shutdown,
f8a88b19 9221 .err_handler = &ipr_err_handler,
1da177e4
LT
9222};
9223
f72919ec
WB
9224/**
9225 * ipr_halt_done - Shutdown prepare completion
9226 *
9227 * Return value:
9228 * none
9229 **/
9230static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9231{
9232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9233
9234 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9235}
9236
9237/**
9238 * ipr_halt - Issue shutdown prepare to all adapters
9239 *
9240 * Return value:
9241 * NOTIFY_OK on success / NOTIFY_DONE on failure
9242 **/
9243static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9244{
9245 struct ipr_cmnd *ipr_cmd;
9246 struct ipr_ioa_cfg *ioa_cfg;
9247 unsigned long flags = 0;
9248
9249 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9250 return NOTIFY_DONE;
9251
9252 spin_lock(&ipr_driver_lock);
9253
9254 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9255 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9256 if (!ioa_cfg->allow_cmds) {
9257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9258 continue;
9259 }
9260
9261 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9262 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9263 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9264 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9265 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9266
9267 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9269 }
9270 spin_unlock(&ipr_driver_lock);
9271
9272 return NOTIFY_OK;
9273}
9274
9275static struct notifier_block ipr_notifier = {
9276 ipr_halt, NULL, 0
9277};
9278
1da177e4
LT
9279/**
9280 * ipr_init - Module entry point
9281 *
9282 * Return value:
9283 * 0 on success / negative value on failure
9284 **/
9285static int __init ipr_init(void)
9286{
9287 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9288 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9289
f72919ec 9290 register_reboot_notifier(&ipr_notifier);
dcbccbde 9291 return pci_register_driver(&ipr_driver);
1da177e4
LT
9292}
9293
9294/**
9295 * ipr_exit - Module unload
9296 *
9297 * Module unload entry point.
9298 *
9299 * Return value:
9300 * none
9301 **/
9302static void __exit ipr_exit(void)
9303{
f72919ec 9304 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9305 pci_unregister_driver(&ipr_driver);
9306}
9307
9308module_init(ipr_init);
9309module_exit(ipr_exit);
This page took 2.721678 seconds and 5 git commands to generate.