Commit | Line | Data |
---|---|---|
f92363d1 SR |
1 | /* |
2 | * This is the Fusion MPT base driver providing common API layer interface | |
3 | * for access to MPT (Message Passing Technology) firmware. | |
4 | * | |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c | |
6 | * Copyright (C) 2012 LSI Corporation | |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version 2 | |
12 | * of the License, or (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * NO WARRANTY | |
20 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | |
21 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | |
22 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | |
23 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | |
24 | * solely responsible for determining the appropriateness of using and | |
25 | * distributing the Program and assumes all risks associated with its | |
26 | * exercise of rights under this Agreement, including but not limited to | |
27 | * the risks and costs of program errors, damage to or loss of data, | |
28 | * programs or equipment, and unavailability or interruption of operations. | |
29 | ||
30 | * DISCLAIMER OF LIABILITY | |
31 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | |
32 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
33 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | |
34 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | |
35 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |
36 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | |
37 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | |
38 | ||
39 | * You should have received a copy of the GNU General Public License | |
40 | * along with this program; if not, write to the Free Software | |
41 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | |
42 | * USA. | |
43 | */ | |
44 | ||
f92363d1 SR |
45 | #include <linux/kernel.h> |
46 | #include <linux/module.h> | |
47 | #include <linux/errno.h> | |
48 | #include <linux/init.h> | |
49 | #include <linux/slab.h> | |
50 | #include <linux/types.h> | |
51 | #include <linux/pci.h> | |
52 | #include <linux/kdev_t.h> | |
53 | #include <linux/blkdev.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/interrupt.h> | |
56 | #include <linux/dma-mapping.h> | |
57 | #include <linux/io.h> | |
58 | #include <linux/time.h> | |
59 | #include <linux/kthread.h> | |
60 | #include <linux/aer.h> | |
61 | ||
62 | ||
63 | #include "mpt3sas_base.h" | |
64 | ||
65 | static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; | |
66 | ||
67 | ||
68 | #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ | |
69 | ||
70 | /* maximum controller queue depth */ | |
71 | #define MAX_HBA_QUEUE_DEPTH 30000 | |
72 | #define MAX_CHAIN_DEPTH 100000 | |
73 | static int max_queue_depth = -1; | |
74 | module_param(max_queue_depth, int, 0); | |
75 | MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); | |
76 | ||
77 | static int max_sgl_entries = -1; | |
78 | module_param(max_sgl_entries, int, 0); | |
79 | MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); | |
80 | ||
81 | static int msix_disable = -1; | |
82 | module_param(msix_disable, int, 0); | |
83 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); | |
84 | ||
85 | ||
86 | static int mpt3sas_fwfault_debug; | |
87 | MODULE_PARM_DESC(mpt3sas_fwfault_debug, | |
88 | " enable detection of firmware fault and halt firmware - (default=0)"); | |
89 | ||
90 | ||
91 | /** | |
92 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. | |
93 | * | |
94 | */ | |
95 | static int | |
96 | _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) | |
97 | { | |
98 | int ret = param_set_int(val, kp); | |
99 | struct MPT3SAS_ADAPTER *ioc; | |
100 | ||
101 | if (ret) | |
102 | return ret; | |
103 | ||
104 | pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); | |
105 | list_for_each_entry(ioc, &mpt3sas_ioc_list, list) | |
106 | ioc->fwfault_debug = mpt3sas_fwfault_debug; | |
107 | return 0; | |
108 | } | |
109 | module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, | |
110 | param_get_int, &mpt3sas_fwfault_debug, 0644); | |
111 | ||
112 | /** | |
113 | * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc | |
114 | * @arg: input argument, used to derive ioc | |
115 | * | |
116 | * Return 0 if controller is removed from pci subsystem. | |
117 | * Return -1 for other case. | |
118 | */ | |
119 | static int mpt3sas_remove_dead_ioc_func(void *arg) | |
120 | { | |
121 | struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; | |
122 | struct pci_dev *pdev; | |
123 | ||
124 | if ((ioc == NULL)) | |
125 | return -1; | |
126 | ||
127 | pdev = ioc->pdev; | |
128 | if ((pdev == NULL)) | |
129 | return -1; | |
130 | pci_stop_and_remove_bus_device(pdev); | |
131 | return 0; | |
132 | } | |
133 | ||
134 | /** | |
135 | * _base_fault_reset_work - workq handling ioc fault conditions | |
136 | * @work: input argument, used to derive ioc | |
137 | * Context: sleep. | |
138 | * | |
139 | * Return nothing. | |
140 | */ | |
141 | static void | |
142 | _base_fault_reset_work(struct work_struct *work) | |
143 | { | |
144 | struct MPT3SAS_ADAPTER *ioc = | |
145 | container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); | |
146 | unsigned long flags; | |
147 | u32 doorbell; | |
148 | int rc; | |
149 | struct task_struct *p; | |
150 | ||
151 | ||
152 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
153 | if (ioc->shost_recovery) | |
154 | goto rearm_timer; | |
155 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
156 | ||
157 | doorbell = mpt3sas_base_get_iocstate(ioc, 0); | |
158 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { | |
159 | pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", | |
160 | ioc->name); | |
161 | ||
162 | /* | |
163 | * Call _scsih_flush_pending_cmds callback so that we flush all | |
164 | * pending commands back to OS. This call is required to aovid | |
165 | * deadlock at block layer. Dead IOC will fail to do diag reset, | |
166 | * and this call is safe since dead ioc will never return any | |
167 | * command back from HW. | |
168 | */ | |
169 | ioc->schedule_dead_ioc_flush_running_cmds(ioc); | |
170 | /* | |
171 | * Set remove_host flag early since kernel thread will | |
172 | * take some time to execute. | |
173 | */ | |
174 | ioc->remove_host = 1; | |
175 | /*Remove the Dead Host */ | |
176 | p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, | |
177 | "mpt3sas_dead_ioc_%d", ioc->id); | |
178 | if (IS_ERR(p)) | |
179 | pr_err(MPT3SAS_FMT | |
180 | "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", | |
181 | ioc->name, __func__); | |
182 | else | |
183 | pr_err(MPT3SAS_FMT | |
184 | "%s: Running mpt3sas_dead_ioc thread success !!!!\n", | |
185 | ioc->name, __func__); | |
186 | return; /* don't rearm timer */ | |
187 | } | |
188 | ||
189 | if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { | |
190 | rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | |
191 | FORCE_BIG_HAMMER); | |
192 | pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, | |
193 | __func__, (rc == 0) ? "success" : "failed"); | |
194 | doorbell = mpt3sas_base_get_iocstate(ioc, 0); | |
195 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) | |
196 | mpt3sas_base_fault_info(ioc, doorbell & | |
197 | MPI2_DOORBELL_DATA_MASK); | |
198 | if (rc && (doorbell & MPI2_IOC_STATE_MASK) != | |
199 | MPI2_IOC_STATE_OPERATIONAL) | |
200 | return; /* don't rearm timer */ | |
201 | } | |
202 | ||
203 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
204 | rearm_timer: | |
205 | if (ioc->fault_reset_work_q) | |
206 | queue_delayed_work(ioc->fault_reset_work_q, | |
207 | &ioc->fault_reset_work, | |
208 | msecs_to_jiffies(FAULT_POLLING_INTERVAL)); | |
209 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
210 | } | |
211 | ||
212 | /** | |
213 | * mpt3sas_base_start_watchdog - start the fault_reset_work_q | |
214 | * @ioc: per adapter object | |
215 | * Context: sleep. | |
216 | * | |
217 | * Return nothing. | |
218 | */ | |
219 | void | |
220 | mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) | |
221 | { | |
222 | unsigned long flags; | |
223 | ||
224 | if (ioc->fault_reset_work_q) | |
225 | return; | |
226 | ||
227 | /* initialize fault polling */ | |
228 | ||
229 | INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); | |
230 | snprintf(ioc->fault_reset_work_q_name, | |
231 | sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); | |
232 | ioc->fault_reset_work_q = | |
233 | create_singlethread_workqueue(ioc->fault_reset_work_q_name); | |
234 | if (!ioc->fault_reset_work_q) { | |
235 | pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", | |
236 | ioc->name, __func__, __LINE__); | |
237 | return; | |
238 | } | |
239 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
240 | if (ioc->fault_reset_work_q) | |
241 | queue_delayed_work(ioc->fault_reset_work_q, | |
242 | &ioc->fault_reset_work, | |
243 | msecs_to_jiffies(FAULT_POLLING_INTERVAL)); | |
244 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
245 | } | |
246 | ||
247 | /** | |
248 | * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q | |
249 | * @ioc: per adapter object | |
250 | * Context: sleep. | |
251 | * | |
252 | * Return nothing. | |
253 | */ | |
254 | void | |
255 | mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) | |
256 | { | |
257 | unsigned long flags; | |
258 | struct workqueue_struct *wq; | |
259 | ||
260 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
261 | wq = ioc->fault_reset_work_q; | |
262 | ioc->fault_reset_work_q = NULL; | |
263 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
264 | if (wq) { | |
265 | if (!cancel_delayed_work(&ioc->fault_reset_work)) | |
266 | flush_workqueue(wq); | |
267 | destroy_workqueue(wq); | |
268 | } | |
269 | } | |
270 | ||
271 | /** | |
272 | * mpt3sas_base_fault_info - verbose translation of firmware FAULT code | |
273 | * @ioc: per adapter object | |
274 | * @fault_code: fault code | |
275 | * | |
276 | * Return nothing. | |
277 | */ | |
278 | void | |
279 | mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) | |
280 | { | |
281 | pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", | |
282 | ioc->name, fault_code); | |
283 | } | |
284 | ||
285 | /** | |
286 | * mpt3sas_halt_firmware - halt's mpt controller firmware | |
287 | * @ioc: per adapter object | |
288 | * | |
289 | * For debugging timeout related issues. Writing 0xCOFFEE00 | |
290 | * to the doorbell register will halt controller firmware. With | |
291 | * the purpose to stop both driver and firmware, the enduser can | |
292 | * obtain a ring buffer from controller UART. | |
293 | */ | |
294 | void | |
295 | mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) | |
296 | { | |
297 | u32 doorbell; | |
298 | ||
299 | if (!ioc->fwfault_debug) | |
300 | return; | |
301 | ||
302 | dump_stack(); | |
303 | ||
304 | doorbell = readl(&ioc->chip->Doorbell); | |
305 | if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) | |
306 | mpt3sas_base_fault_info(ioc , doorbell); | |
307 | else { | |
308 | writel(0xC0FFEE00, &ioc->chip->Doorbell); | |
309 | pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", | |
310 | ioc->name); | |
311 | } | |
312 | ||
313 | if (ioc->fwfault_debug == 2) | |
314 | for (;;) | |
315 | ; | |
316 | else | |
317 | panic("panic in %s\n", __func__); | |
318 | } | |
319 | ||
320 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | |
321 | /** | |
322 | * _base_sas_ioc_info - verbose translation of the ioc status | |
323 | * @ioc: per adapter object | |
324 | * @mpi_reply: reply mf payload returned from firmware | |
325 | * @request_hdr: request mf | |
326 | * | |
327 | * Return nothing. | |
328 | */ | |
329 | static void | |
330 | _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, | |
331 | MPI2RequestHeader_t *request_hdr) | |
332 | { | |
333 | u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & | |
334 | MPI2_IOCSTATUS_MASK; | |
335 | char *desc = NULL; | |
336 | u16 frame_sz; | |
337 | char *func_str = NULL; | |
338 | ||
339 | /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ | |
340 | if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || | |
341 | request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || | |
342 | request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) | |
343 | return; | |
344 | ||
345 | if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) | |
346 | return; | |
347 | ||
348 | switch (ioc_status) { | |
349 | ||
350 | /**************************************************************************** | |
351 | * Common IOCStatus values for all replies | |
352 | ****************************************************************************/ | |
353 | ||
354 | case MPI2_IOCSTATUS_INVALID_FUNCTION: | |
355 | desc = "invalid function"; | |
356 | break; | |
357 | case MPI2_IOCSTATUS_BUSY: | |
358 | desc = "busy"; | |
359 | break; | |
360 | case MPI2_IOCSTATUS_INVALID_SGL: | |
361 | desc = "invalid sgl"; | |
362 | break; | |
363 | case MPI2_IOCSTATUS_INTERNAL_ERROR: | |
364 | desc = "internal error"; | |
365 | break; | |
366 | case MPI2_IOCSTATUS_INVALID_VPID: | |
367 | desc = "invalid vpid"; | |
368 | break; | |
369 | case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: | |
370 | desc = "insufficient resources"; | |
371 | break; | |
372 | case MPI2_IOCSTATUS_INVALID_FIELD: | |
373 | desc = "invalid field"; | |
374 | break; | |
375 | case MPI2_IOCSTATUS_INVALID_STATE: | |
376 | desc = "invalid state"; | |
377 | break; | |
378 | case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: | |
379 | desc = "op state not supported"; | |
380 | break; | |
381 | ||
382 | /**************************************************************************** | |
383 | * Config IOCStatus values | |
384 | ****************************************************************************/ | |
385 | ||
386 | case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: | |
387 | desc = "config invalid action"; | |
388 | break; | |
389 | case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: | |
390 | desc = "config invalid type"; | |
391 | break; | |
392 | case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: | |
393 | desc = "config invalid page"; | |
394 | break; | |
395 | case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: | |
396 | desc = "config invalid data"; | |
397 | break; | |
398 | case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: | |
399 | desc = "config no defaults"; | |
400 | break; | |
401 | case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: | |
402 | desc = "config cant commit"; | |
403 | break; | |
404 | ||
405 | /**************************************************************************** | |
406 | * SCSI IO Reply | |
407 | ****************************************************************************/ | |
408 | ||
409 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: | |
410 | case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: | |
411 | case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: | |
412 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: | |
413 | case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: | |
414 | case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: | |
415 | case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: | |
416 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: | |
417 | case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: | |
418 | case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: | |
419 | case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: | |
420 | case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: | |
421 | break; | |
422 | ||
423 | /**************************************************************************** | |
424 | * For use by SCSI Initiator and SCSI Target end-to-end data protection | |
425 | ****************************************************************************/ | |
426 | ||
427 | case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: | |
428 | desc = "eedp guard error"; | |
429 | break; | |
430 | case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: | |
431 | desc = "eedp ref tag error"; | |
432 | break; | |
433 | case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: | |
434 | desc = "eedp app tag error"; | |
435 | break; | |
436 | ||
437 | /**************************************************************************** | |
438 | * SCSI Target values | |
439 | ****************************************************************************/ | |
440 | ||
441 | case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: | |
442 | desc = "target invalid io index"; | |
443 | break; | |
444 | case MPI2_IOCSTATUS_TARGET_ABORTED: | |
445 | desc = "target aborted"; | |
446 | break; | |
447 | case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: | |
448 | desc = "target no conn retryable"; | |
449 | break; | |
450 | case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: | |
451 | desc = "target no connection"; | |
452 | break; | |
453 | case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: | |
454 | desc = "target xfer count mismatch"; | |
455 | break; | |
456 | case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: | |
457 | desc = "target data offset error"; | |
458 | break; | |
459 | case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: | |
460 | desc = "target too much write data"; | |
461 | break; | |
462 | case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: | |
463 | desc = "target iu too short"; | |
464 | break; | |
465 | case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: | |
466 | desc = "target ack nak timeout"; | |
467 | break; | |
468 | case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: | |
469 | desc = "target nak received"; | |
470 | break; | |
471 | ||
472 | /**************************************************************************** | |
473 | * Serial Attached SCSI values | |
474 | ****************************************************************************/ | |
475 | ||
476 | case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: | |
477 | desc = "smp request failed"; | |
478 | break; | |
479 | case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: | |
480 | desc = "smp data overrun"; | |
481 | break; | |
482 | ||
483 | /**************************************************************************** | |
484 | * Diagnostic Buffer Post / Diagnostic Release values | |
485 | ****************************************************************************/ | |
486 | ||
487 | case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: | |
488 | desc = "diagnostic released"; | |
489 | break; | |
490 | default: | |
491 | break; | |
492 | } | |
493 | ||
494 | if (!desc) | |
495 | return; | |
496 | ||
497 | switch (request_hdr->Function) { | |
498 | case MPI2_FUNCTION_CONFIG: | |
499 | frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; | |
500 | func_str = "config_page"; | |
501 | break; | |
502 | case MPI2_FUNCTION_SCSI_TASK_MGMT: | |
503 | frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); | |
504 | func_str = "task_mgmt"; | |
505 | break; | |
506 | case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: | |
507 | frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); | |
508 | func_str = "sas_iounit_ctl"; | |
509 | break; | |
510 | case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: | |
511 | frame_sz = sizeof(Mpi2SepRequest_t); | |
512 | func_str = "enclosure"; | |
513 | break; | |
514 | case MPI2_FUNCTION_IOC_INIT: | |
515 | frame_sz = sizeof(Mpi2IOCInitRequest_t); | |
516 | func_str = "ioc_init"; | |
517 | break; | |
518 | case MPI2_FUNCTION_PORT_ENABLE: | |
519 | frame_sz = sizeof(Mpi2PortEnableRequest_t); | |
520 | func_str = "port_enable"; | |
521 | break; | |
522 | case MPI2_FUNCTION_SMP_PASSTHROUGH: | |
523 | frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; | |
524 | func_str = "smp_passthru"; | |
525 | break; | |
526 | default: | |
527 | frame_sz = 32; | |
528 | func_str = "unknown"; | |
529 | break; | |
530 | } | |
531 | ||
532 | pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", | |
533 | ioc->name, desc, ioc_status, request_hdr, func_str); | |
534 | ||
535 | _debug_dump_mf(request_hdr, frame_sz/4); | |
536 | } | |
537 | ||
538 | /** | |
539 | * _base_display_event_data - verbose translation of firmware asyn events | |
540 | * @ioc: per adapter object | |
541 | * @mpi_reply: reply mf payload returned from firmware | |
542 | * | |
543 | * Return nothing. | |
544 | */ | |
545 | static void | |
546 | _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | |
547 | Mpi2EventNotificationReply_t *mpi_reply) | |
548 | { | |
549 | char *desc = NULL; | |
550 | u16 event; | |
551 | ||
552 | if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) | |
553 | return; | |
554 | ||
555 | event = le16_to_cpu(mpi_reply->Event); | |
556 | ||
557 | switch (event) { | |
558 | case MPI2_EVENT_LOG_DATA: | |
559 | desc = "Log Data"; | |
560 | break; | |
561 | case MPI2_EVENT_STATE_CHANGE: | |
562 | desc = "Status Change"; | |
563 | break; | |
564 | case MPI2_EVENT_HARD_RESET_RECEIVED: | |
565 | desc = "Hard Reset Received"; | |
566 | break; | |
567 | case MPI2_EVENT_EVENT_CHANGE: | |
568 | desc = "Event Change"; | |
569 | break; | |
570 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: | |
571 | desc = "Device Status Change"; | |
572 | break; | |
573 | case MPI2_EVENT_IR_OPERATION_STATUS: | |
574 | desc = "IR Operation Status"; | |
575 | break; | |
576 | case MPI2_EVENT_SAS_DISCOVERY: | |
577 | { | |
578 | Mpi2EventDataSasDiscovery_t *event_data = | |
579 | (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; | |
580 | pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, | |
581 | (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? | |
582 | "start" : "stop"); | |
583 | if (event_data->DiscoveryStatus) | |
584 | pr_info("discovery_status(0x%08x)", | |
585 | le32_to_cpu(event_data->DiscoveryStatus)); | |
586 | pr_info("\n"); | |
587 | return; | |
588 | } | |
589 | case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: | |
590 | desc = "SAS Broadcast Primitive"; | |
591 | break; | |
592 | case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: | |
593 | desc = "SAS Init Device Status Change"; | |
594 | break; | |
595 | case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: | |
596 | desc = "SAS Init Table Overflow"; | |
597 | break; | |
598 | case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: | |
599 | desc = "SAS Topology Change List"; | |
600 | break; | |
601 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: | |
602 | desc = "SAS Enclosure Device Status Change"; | |
603 | break; | |
604 | case MPI2_EVENT_IR_VOLUME: | |
605 | desc = "IR Volume"; | |
606 | break; | |
607 | case MPI2_EVENT_IR_PHYSICAL_DISK: | |
608 | desc = "IR Physical Disk"; | |
609 | break; | |
610 | case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: | |
611 | desc = "IR Configuration Change List"; | |
612 | break; | |
613 | case MPI2_EVENT_LOG_ENTRY_ADDED: | |
614 | desc = "Log Entry Added"; | |
615 | break; | |
616 | } | |
617 | ||
618 | if (!desc) | |
619 | return; | |
620 | ||
621 | pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); | |
622 | } | |
623 | #endif | |
624 | ||
625 | /** | |
626 | * _base_sas_log_info - verbose translation of firmware log info | |
627 | * @ioc: per adapter object | |
628 | * @log_info: log info | |
629 | * | |
630 | * Return nothing. | |
631 | */ | |
632 | static void | |
633 | _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) | |
634 | { | |
635 | union loginfo_type { | |
636 | u32 loginfo; | |
637 | struct { | |
638 | u32 subcode:16; | |
639 | u32 code:8; | |
640 | u32 originator:4; | |
641 | u32 bus_type:4; | |
642 | } dw; | |
643 | }; | |
644 | union loginfo_type sas_loginfo; | |
645 | char *originator_str = NULL; | |
646 | ||
647 | sas_loginfo.loginfo = log_info; | |
648 | if (sas_loginfo.dw.bus_type != 3 /*SAS*/) | |
649 | return; | |
650 | ||
651 | /* each nexus loss loginfo */ | |
652 | if (log_info == 0x31170000) | |
653 | return; | |
654 | ||
655 | /* eat the loginfos associated with task aborts */ | |
656 | if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == | |
657 | 0x31140000 || log_info == 0x31130000)) | |
658 | return; | |
659 | ||
660 | switch (sas_loginfo.dw.originator) { | |
661 | case 0: | |
662 | originator_str = "IOP"; | |
663 | break; | |
664 | case 1: | |
665 | originator_str = "PL"; | |
666 | break; | |
667 | case 2: | |
668 | originator_str = "IR"; | |
669 | break; | |
670 | } | |
671 | ||
672 | pr_warn(MPT3SAS_FMT | |
673 | "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", | |
674 | ioc->name, log_info, | |
675 | originator_str, sas_loginfo.dw.code, | |
676 | sas_loginfo.dw.subcode); | |
677 | } | |
678 | ||
679 | /** | |
680 | * _base_display_reply_info - | |
681 | * @ioc: per adapter object | |
682 | * @smid: system request message index | |
683 | * @msix_index: MSIX table index supplied by the OS | |
684 | * @reply: reply message frame(lower 32bit addr) | |
685 | * | |
686 | * Return nothing. | |
687 | */ | |
688 | static void | |
689 | _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |
690 | u32 reply) | |
691 | { | |
692 | MPI2DefaultReply_t *mpi_reply; | |
693 | u16 ioc_status; | |
694 | u32 loginfo = 0; | |
695 | ||
696 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | |
697 | if (unlikely(!mpi_reply)) { | |
698 | pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", | |
699 | ioc->name, __FILE__, __LINE__, __func__); | |
700 | return; | |
701 | } | |
702 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus); | |
703 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | |
704 | if ((ioc_status & MPI2_IOCSTATUS_MASK) && | |
705 | (ioc->logging_level & MPT_DEBUG_REPLY)) { | |
706 | _base_sas_ioc_info(ioc , mpi_reply, | |
707 | mpt3sas_base_get_msg_frame(ioc, smid)); | |
708 | } | |
709 | #endif | |
710 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { | |
711 | loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); | |
712 | _base_sas_log_info(ioc, loginfo); | |
713 | } | |
714 | ||
715 | if (ioc_status || loginfo) { | |
716 | ioc_status &= MPI2_IOCSTATUS_MASK; | |
717 | mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); | |
718 | } | |
719 | } | |
720 | ||
721 | /** | |
722 | * mpt3sas_base_done - base internal command completion routine | |
723 | * @ioc: per adapter object | |
724 | * @smid: system request message index | |
725 | * @msix_index: MSIX table index supplied by the OS | |
726 | * @reply: reply message frame(lower 32bit addr) | |
727 | * | |
728 | * Return 1 meaning mf should be freed from _base_interrupt | |
729 | * 0 means the mf is freed from this function. | |
730 | */ | |
731 | u8 | |
732 | mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |
733 | u32 reply) | |
734 | { | |
735 | MPI2DefaultReply_t *mpi_reply; | |
736 | ||
737 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | |
738 | if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) | |
739 | return 1; | |
740 | ||
741 | if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) | |
742 | return 1; | |
743 | ||
744 | ioc->base_cmds.status |= MPT3_CMD_COMPLETE; | |
745 | if (mpi_reply) { | |
746 | ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; | |
747 | memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); | |
748 | } | |
749 | ioc->base_cmds.status &= ~MPT3_CMD_PENDING; | |
750 | ||
751 | complete(&ioc->base_cmds.done); | |
752 | return 1; | |
753 | } | |
754 | ||
755 | /** | |
756 | * _base_async_event - main callback handler for firmware asyn events | |
757 | * @ioc: per adapter object | |
758 | * @msix_index: MSIX table index supplied by the OS | |
759 | * @reply: reply message frame(lower 32bit addr) | |
760 | * | |
761 | * Return 1 meaning mf should be freed from _base_interrupt | |
762 | * 0 means the mf is freed from this function. | |
763 | */ | |
764 | static u8 | |
765 | _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) | |
766 | { | |
767 | Mpi2EventNotificationReply_t *mpi_reply; | |
768 | Mpi2EventAckRequest_t *ack_request; | |
769 | u16 smid; | |
770 | ||
771 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | |
772 | if (!mpi_reply) | |
773 | return 1; | |
774 | if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) | |
775 | return 1; | |
776 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | |
777 | _base_display_event_data(ioc, mpi_reply); | |
778 | #endif | |
779 | if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) | |
780 | goto out; | |
781 | smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); | |
782 | if (!smid) { | |
783 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
784 | ioc->name, __func__); | |
785 | goto out; | |
786 | } | |
787 | ||
788 | ack_request = mpt3sas_base_get_msg_frame(ioc, smid); | |
789 | memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); | |
790 | ack_request->Function = MPI2_FUNCTION_EVENT_ACK; | |
791 | ack_request->Event = mpi_reply->Event; | |
792 | ack_request->EventContext = mpi_reply->EventContext; | |
793 | ack_request->VF_ID = 0; /* TODO */ | |
794 | ack_request->VP_ID = 0; | |
795 | mpt3sas_base_put_smid_default(ioc, smid); | |
796 | ||
797 | out: | |
798 | ||
799 | /* scsih callback handler */ | |
800 | mpt3sas_scsih_event_callback(ioc, msix_index, reply); | |
801 | ||
802 | /* ctl callback handler */ | |
803 | mpt3sas_ctl_event_callback(ioc, msix_index, reply); | |
804 | ||
805 | return 1; | |
806 | } | |
807 | ||
808 | /** | |
809 | * _base_get_cb_idx - obtain the callback index | |
810 | * @ioc: per adapter object | |
811 | * @smid: system request message index | |
812 | * | |
813 | * Return callback index. | |
814 | */ | |
815 | static u8 | |
816 | _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
817 | { | |
818 | int i; | |
819 | u8 cb_idx; | |
820 | ||
821 | if (smid < ioc->hi_priority_smid) { | |
822 | i = smid - 1; | |
823 | cb_idx = ioc->scsi_lookup[i].cb_idx; | |
824 | } else if (smid < ioc->internal_smid) { | |
825 | i = smid - ioc->hi_priority_smid; | |
826 | cb_idx = ioc->hpr_lookup[i].cb_idx; | |
827 | } else if (smid <= ioc->hba_queue_depth) { | |
828 | i = smid - ioc->internal_smid; | |
829 | cb_idx = ioc->internal_lookup[i].cb_idx; | |
830 | } else | |
831 | cb_idx = 0xFF; | |
832 | return cb_idx; | |
833 | } | |
834 | ||
835 | /** | |
836 | * _base_mask_interrupts - disable interrupts | |
837 | * @ioc: per adapter object | |
838 | * | |
839 | * Disabling ResetIRQ, Reply and Doorbell Interrupts | |
840 | * | |
841 | * Return nothing. | |
842 | */ | |
843 | static void | |
844 | _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) | |
845 | { | |
846 | u32 him_register; | |
847 | ||
848 | ioc->mask_interrupts = 1; | |
849 | him_register = readl(&ioc->chip->HostInterruptMask); | |
850 | him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; | |
851 | writel(him_register, &ioc->chip->HostInterruptMask); | |
852 | readl(&ioc->chip->HostInterruptMask); | |
853 | } | |
854 | ||
855 | /** | |
856 | * _base_unmask_interrupts - enable interrupts | |
857 | * @ioc: per adapter object | |
858 | * | |
859 | * Enabling only Reply Interrupts | |
860 | * | |
861 | * Return nothing. | |
862 | */ | |
863 | static void | |
864 | _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) | |
865 | { | |
866 | u32 him_register; | |
867 | ||
868 | him_register = readl(&ioc->chip->HostInterruptMask); | |
869 | him_register &= ~MPI2_HIM_RIM; | |
870 | writel(him_register, &ioc->chip->HostInterruptMask); | |
871 | ioc->mask_interrupts = 0; | |
872 | } | |
873 | ||
874 | union reply_descriptor { | |
875 | u64 word; | |
876 | struct { | |
877 | u32 low; | |
878 | u32 high; | |
879 | } u; | |
880 | }; | |
881 | ||
882 | /** | |
883 | * _base_interrupt - MPT adapter (IOC) specific interrupt handler. | |
884 | * @irq: irq number (not used) | |
885 | * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure | |
886 | * @r: pt_regs pointer (not used) | |
887 | * | |
888 | * Return IRQ_HANDLE if processed, else IRQ_NONE. | |
889 | */ | |
890 | static irqreturn_t | |
891 | _base_interrupt(int irq, void *bus_id) | |
892 | { | |
893 | struct adapter_reply_queue *reply_q = bus_id; | |
894 | union reply_descriptor rd; | |
895 | u32 completed_cmds; | |
896 | u8 request_desript_type; | |
897 | u16 smid; | |
898 | u8 cb_idx; | |
899 | u32 reply; | |
900 | u8 msix_index = reply_q->msix_index; | |
901 | struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; | |
902 | Mpi2ReplyDescriptorsUnion_t *rpf; | |
903 | u8 rc; | |
904 | ||
905 | if (ioc->mask_interrupts) | |
906 | return IRQ_NONE; | |
907 | ||
908 | if (!atomic_add_unless(&reply_q->busy, 1, 1)) | |
909 | return IRQ_NONE; | |
910 | ||
911 | rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; | |
912 | request_desript_type = rpf->Default.ReplyFlags | |
913 | & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; | |
914 | if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { | |
915 | atomic_dec(&reply_q->busy); | |
916 | return IRQ_NONE; | |
917 | } | |
918 | ||
919 | completed_cmds = 0; | |
920 | cb_idx = 0xFF; | |
921 | do { | |
922 | rd.word = le64_to_cpu(rpf->Words); | |
923 | if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) | |
924 | goto out; | |
925 | reply = 0; | |
926 | smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); | |
927 | if (request_desript_type == | |
928 | MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || | |
929 | request_desript_type == | |
930 | MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { | |
931 | cb_idx = _base_get_cb_idx(ioc, smid); | |
932 | if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && | |
933 | (likely(mpt_callbacks[cb_idx] != NULL))) { | |
934 | rc = mpt_callbacks[cb_idx](ioc, smid, | |
935 | msix_index, 0); | |
936 | if (rc) | |
937 | mpt3sas_base_free_smid(ioc, smid); | |
938 | } | |
939 | } else if (request_desript_type == | |
940 | MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { | |
941 | reply = le32_to_cpu( | |
942 | rpf->AddressReply.ReplyFrameAddress); | |
943 | if (reply > ioc->reply_dma_max_address || | |
944 | reply < ioc->reply_dma_min_address) | |
945 | reply = 0; | |
946 | if (smid) { | |
947 | cb_idx = _base_get_cb_idx(ioc, smid); | |
948 | if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && | |
949 | (likely(mpt_callbacks[cb_idx] != NULL))) { | |
950 | rc = mpt_callbacks[cb_idx](ioc, smid, | |
951 | msix_index, reply); | |
952 | if (reply) | |
953 | _base_display_reply_info(ioc, | |
954 | smid, msix_index, reply); | |
955 | if (rc) | |
956 | mpt3sas_base_free_smid(ioc, | |
957 | smid); | |
958 | } | |
959 | } else { | |
960 | _base_async_event(ioc, msix_index, reply); | |
961 | } | |
962 | ||
963 | /* reply free queue handling */ | |
964 | if (reply) { | |
965 | ioc->reply_free_host_index = | |
966 | (ioc->reply_free_host_index == | |
967 | (ioc->reply_free_queue_depth - 1)) ? | |
968 | 0 : ioc->reply_free_host_index + 1; | |
969 | ioc->reply_free[ioc->reply_free_host_index] = | |
970 | cpu_to_le32(reply); | |
971 | wmb(); | |
972 | writel(ioc->reply_free_host_index, | |
973 | &ioc->chip->ReplyFreeHostIndex); | |
974 | } | |
975 | } | |
976 | ||
977 | rpf->Words = cpu_to_le64(ULLONG_MAX); | |
978 | reply_q->reply_post_host_index = | |
979 | (reply_q->reply_post_host_index == | |
980 | (ioc->reply_post_queue_depth - 1)) ? 0 : | |
981 | reply_q->reply_post_host_index + 1; | |
982 | request_desript_type = | |
983 | reply_q->reply_post_free[reply_q->reply_post_host_index]. | |
984 | Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; | |
985 | completed_cmds++; | |
986 | if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) | |
987 | goto out; | |
988 | if (!reply_q->reply_post_host_index) | |
989 | rpf = reply_q->reply_post_free; | |
990 | else | |
991 | rpf++; | |
992 | } while (1); | |
993 | ||
994 | out: | |
995 | ||
996 | if (!completed_cmds) { | |
997 | atomic_dec(&reply_q->busy); | |
998 | return IRQ_NONE; | |
999 | } | |
1000 | ||
1001 | wmb(); | |
1002 | writel(reply_q->reply_post_host_index | (msix_index << | |
1003 | MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); | |
1004 | atomic_dec(&reply_q->busy); | |
1005 | return IRQ_HANDLED; | |
1006 | } | |
1007 | ||
1008 | /** | |
1009 | * _base_is_controller_msix_enabled - is controller support muli-reply queues | |
1010 | * @ioc: per adapter object | |
1011 | * | |
1012 | */ | |
1013 | static inline int | |
1014 | _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) | |
1015 | { | |
1016 | return (ioc->facts.IOCCapabilities & | |
1017 | MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; | |
1018 | } | |
1019 | ||
1020 | /** | |
1021 | * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues | |
1022 | * @ioc: per adapter object | |
1023 | * Context: ISR conext | |
1024 | * | |
1025 | * Called when a Task Management request has completed. We want | |
1026 | * to flush the other reply queues so all the outstanding IO has been | |
1027 | * completed back to OS before we process the TM completetion. | |
1028 | * | |
1029 | * Return nothing. | |
1030 | */ | |
1031 | void | |
1032 | mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |
1033 | { | |
1034 | struct adapter_reply_queue *reply_q; | |
1035 | ||
1036 | /* If MSIX capability is turned off | |
1037 | * then multi-queues are not enabled | |
1038 | */ | |
1039 | if (!_base_is_controller_msix_enabled(ioc)) | |
1040 | return; | |
1041 | ||
1042 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | |
1043 | if (ioc->shost_recovery) | |
1044 | return; | |
1045 | /* TMs are on msix_index == 0 */ | |
1046 | if (reply_q->msix_index == 0) | |
1047 | continue; | |
1048 | _base_interrupt(reply_q->vector, (void *)reply_q); | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | /** | |
1053 | * mpt3sas_base_release_callback_handler - clear interrupt callback handler | |
1054 | * @cb_idx: callback index | |
1055 | * | |
1056 | * Return nothing. | |
1057 | */ | |
1058 | void | |
1059 | mpt3sas_base_release_callback_handler(u8 cb_idx) | |
1060 | { | |
1061 | mpt_callbacks[cb_idx] = NULL; | |
1062 | } | |
1063 | ||
1064 | /** | |
1065 | * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler | |
1066 | * @cb_func: callback function | |
1067 | * | |
1068 | * Returns cb_func. | |
1069 | */ | |
1070 | u8 | |
1071 | mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) | |
1072 | { | |
1073 | u8 cb_idx; | |
1074 | ||
1075 | for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) | |
1076 | if (mpt_callbacks[cb_idx] == NULL) | |
1077 | break; | |
1078 | ||
1079 | mpt_callbacks[cb_idx] = cb_func; | |
1080 | return cb_idx; | |
1081 | } | |
1082 | ||
1083 | /** | |
1084 | * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler | |
1085 | * | |
1086 | * Return nothing. | |
1087 | */ | |
1088 | void | |
1089 | mpt3sas_base_initialize_callback_handler(void) | |
1090 | { | |
1091 | u8 cb_idx; | |
1092 | ||
1093 | for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) | |
1094 | mpt3sas_base_release_callback_handler(cb_idx); | |
1095 | } | |
1096 | ||
1097 | ||
1098 | /** | |
1099 | * _base_build_zero_len_sge - build zero length sg entry | |
1100 | * @ioc: per adapter object | |
1101 | * @paddr: virtual address for SGE | |
1102 | * | |
1103 | * Create a zero length scatter gather entry to insure the IOCs hardware has | |
1104 | * something to use if the target device goes brain dead and tries | |
1105 | * to send data even when none is asked for. | |
1106 | * | |
1107 | * Return nothing. | |
1108 | */ | |
1109 | static void | |
1110 | _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) | |
1111 | { | |
1112 | u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | | |
1113 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | | |
1114 | MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << | |
1115 | MPI2_SGE_FLAGS_SHIFT); | |
1116 | ioc->base_add_sg_single(paddr, flags_length, -1); | |
1117 | } | |
1118 | ||
1119 | /** | |
1120 | * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. | |
1121 | * @paddr: virtual address for SGE | |
1122 | * @flags_length: SGE flags and data transfer length | |
1123 | * @dma_addr: Physical address | |
1124 | * | |
1125 | * Return nothing. | |
1126 | */ | |
1127 | static void | |
1128 | _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) | |
1129 | { | |
1130 | Mpi2SGESimple32_t *sgel = paddr; | |
1131 | ||
1132 | flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | | |
1133 | MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; | |
1134 | sgel->FlagsLength = cpu_to_le32(flags_length); | |
1135 | sgel->Address = cpu_to_le32(dma_addr); | |
1136 | } | |
1137 | ||
1138 | ||
1139 | /** | |
1140 | * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. | |
1141 | * @paddr: virtual address for SGE | |
1142 | * @flags_length: SGE flags and data transfer length | |
1143 | * @dma_addr: Physical address | |
1144 | * | |
1145 | * Return nothing. | |
1146 | */ | |
1147 | static void | |
1148 | _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) | |
1149 | { | |
1150 | Mpi2SGESimple64_t *sgel = paddr; | |
1151 | ||
1152 | flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | | |
1153 | MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; | |
1154 | sgel->FlagsLength = cpu_to_le32(flags_length); | |
1155 | sgel->Address = cpu_to_le64(dma_addr); | |
1156 | } | |
1157 | ||
1158 | /** | |
1159 | * _base_get_chain_buffer_tracker - obtain chain tracker | |
1160 | * @ioc: per adapter object | |
1161 | * @smid: smid associated to an IO request | |
1162 | * | |
1163 | * Returns chain tracker(from ioc->free_chain_list) | |
1164 | */ | |
1165 | static struct chain_tracker * | |
1166 | _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
1167 | { | |
1168 | struct chain_tracker *chain_req; | |
1169 | unsigned long flags; | |
1170 | ||
1171 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
1172 | if (list_empty(&ioc->free_chain_list)) { | |
1173 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1174 | dfailprintk(ioc, pr_warn(MPT3SAS_FMT | |
1175 | "chain buffers not available\n", ioc->name)); | |
1176 | return NULL; | |
1177 | } | |
1178 | chain_req = list_entry(ioc->free_chain_list.next, | |
1179 | struct chain_tracker, tracker_list); | |
1180 | list_del_init(&chain_req->tracker_list); | |
1181 | list_add_tail(&chain_req->tracker_list, | |
1182 | &ioc->scsi_lookup[smid - 1].chain_list); | |
1183 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1184 | return chain_req; | |
1185 | } | |
1186 | ||
1187 | ||
1188 | /** | |
1189 | * _base_build_sg - build generic sg | |
1190 | * @ioc: per adapter object | |
1191 | * @psge: virtual address for SGE | |
1192 | * @data_out_dma: physical address for WRITES | |
1193 | * @data_out_sz: data xfer size for WRITES | |
1194 | * @data_in_dma: physical address for READS | |
1195 | * @data_in_sz: data xfer size for READS | |
1196 | * | |
1197 | * Return nothing. | |
1198 | */ | |
1199 | static void | |
1200 | _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, | |
1201 | dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, | |
1202 | size_t data_in_sz) | |
1203 | { | |
1204 | u32 sgl_flags; | |
1205 | ||
1206 | if (!data_out_sz && !data_in_sz) { | |
1207 | _base_build_zero_len_sge(ioc, psge); | |
1208 | return; | |
1209 | } | |
1210 | ||
1211 | if (data_out_sz && data_in_sz) { | |
1212 | /* WRITE sgel first */ | |
1213 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | | |
1214 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); | |
1215 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; | |
1216 | ioc->base_add_sg_single(psge, sgl_flags | | |
1217 | data_out_sz, data_out_dma); | |
1218 | ||
1219 | /* incr sgel */ | |
1220 | psge += ioc->sge_size; | |
1221 | ||
1222 | /* READ sgel last */ | |
1223 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | | |
1224 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | | |
1225 | MPI2_SGE_FLAGS_END_OF_LIST); | |
1226 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; | |
1227 | ioc->base_add_sg_single(psge, sgl_flags | | |
1228 | data_in_sz, data_in_dma); | |
1229 | } else if (data_out_sz) /* WRITE */ { | |
1230 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | | |
1231 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | | |
1232 | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); | |
1233 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; | |
1234 | ioc->base_add_sg_single(psge, sgl_flags | | |
1235 | data_out_sz, data_out_dma); | |
1236 | } else if (data_in_sz) /* READ */ { | |
1237 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | | |
1238 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | | |
1239 | MPI2_SGE_FLAGS_END_OF_LIST); | |
1240 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; | |
1241 | ioc->base_add_sg_single(psge, sgl_flags | | |
1242 | data_in_sz, data_in_dma); | |
1243 | } | |
1244 | } | |
1245 | ||
1246 | /* IEEE format sgls */ | |
1247 | ||
1248 | /** | |
1249 | * _base_add_sg_single_ieee - add sg element for IEEE format | |
1250 | * @paddr: virtual address for SGE | |
1251 | * @flags: SGE flags | |
1252 | * @chain_offset: number of 128 byte elements from start of segment | |
1253 | * @length: data transfer length | |
1254 | * @dma_addr: Physical address | |
1255 | * | |
1256 | * Return nothing. | |
1257 | */ | |
1258 | static void | |
1259 | _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, | |
1260 | dma_addr_t dma_addr) | |
1261 | { | |
1262 | Mpi25IeeeSgeChain64_t *sgel = paddr; | |
1263 | ||
1264 | sgel->Flags = flags; | |
1265 | sgel->NextChainOffset = chain_offset; | |
1266 | sgel->Length = cpu_to_le32(length); | |
1267 | sgel->Address = cpu_to_le64(dma_addr); | |
1268 | } | |
1269 | ||
1270 | /** | |
1271 | * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format | |
1272 | * @ioc: per adapter object | |
1273 | * @paddr: virtual address for SGE | |
1274 | * | |
1275 | * Create a zero length scatter gather entry to insure the IOCs hardware has | |
1276 | * something to use if the target device goes brain dead and tries | |
1277 | * to send data even when none is asked for. | |
1278 | * | |
1279 | * Return nothing. | |
1280 | */ | |
1281 | static void | |
1282 | _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) | |
1283 | { | |
1284 | u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | | |
1285 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | | |
1286 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST); | |
1287 | _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); | |
1288 | } | |
1289 | ||
1290 | /** | |
1291 | * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format | |
1292 | * @ioc: per adapter object | |
1293 | * @scmd: scsi command | |
1294 | * @smid: system request message index | |
1295 | * Context: none. | |
1296 | * | |
1297 | * The main routine that builds scatter gather table from a given | |
1298 | * scsi request sent via the .queuecommand main handler. | |
1299 | * | |
1300 | * Returns 0 success, anything else error | |
1301 | */ | |
1302 | static int | |
1303 | _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, | |
1304 | struct scsi_cmnd *scmd, u16 smid) | |
1305 | { | |
1306 | Mpi2SCSIIORequest_t *mpi_request; | |
1307 | dma_addr_t chain_dma; | |
1308 | struct scatterlist *sg_scmd; | |
1309 | void *sg_local, *chain; | |
1310 | u32 chain_offset; | |
1311 | u32 chain_length; | |
1312 | u32 chain_flags; | |
1313 | int sges_left; | |
1314 | u32 sges_in_segment; | |
1315 | u8 simple_sgl_flags; | |
1316 | u8 simple_sgl_flags_last; | |
1317 | u8 chain_sgl_flags; | |
1318 | struct chain_tracker *chain_req; | |
1319 | ||
1320 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | |
1321 | ||
1322 | /* init scatter gather flags */ | |
1323 | simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | | |
1324 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; | |
1325 | simple_sgl_flags_last = simple_sgl_flags | | |
1326 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST; | |
1327 | chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | | |
1328 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; | |
1329 | ||
1330 | sg_scmd = scsi_sglist(scmd); | |
1331 | sges_left = scsi_dma_map(scmd); | |
1332 | if (!sges_left) { | |
1333 | sdev_printk(KERN_ERR, scmd->device, | |
1334 | "pci_map_sg failed: request for %d bytes!\n", | |
1335 | scsi_bufflen(scmd)); | |
1336 | return -ENOMEM; | |
1337 | } | |
1338 | ||
1339 | sg_local = &mpi_request->SGL; | |
1340 | sges_in_segment = (ioc->request_sz - | |
1341 | offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee; | |
1342 | if (sges_left <= sges_in_segment) | |
1343 | goto fill_in_last_segment; | |
1344 | ||
1345 | mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + | |
1346 | (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee); | |
1347 | ||
1348 | /* fill in main message segment when there is a chain following */ | |
1349 | while (sges_in_segment > 1) { | |
1350 | _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, | |
1351 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); | |
1352 | sg_scmd = sg_next(sg_scmd); | |
1353 | sg_local += ioc->sge_size_ieee; | |
1354 | sges_left--; | |
1355 | sges_in_segment--; | |
1356 | } | |
1357 | ||
1358 | /* initializing the chain flags and pointers */ | |
1359 | chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; | |
1360 | chain_req = _base_get_chain_buffer_tracker(ioc, smid); | |
1361 | if (!chain_req) | |
1362 | return -1; | |
1363 | chain = chain_req->chain_buffer; | |
1364 | chain_dma = chain_req->chain_buffer_dma; | |
1365 | do { | |
1366 | sges_in_segment = (sges_left <= | |
1367 | ioc->max_sges_in_chain_message) ? sges_left : | |
1368 | ioc->max_sges_in_chain_message; | |
1369 | chain_offset = (sges_left == sges_in_segment) ? | |
1370 | 0 : sges_in_segment; | |
1371 | chain_length = sges_in_segment * ioc->sge_size_ieee; | |
1372 | if (chain_offset) | |
1373 | chain_length += ioc->sge_size_ieee; | |
1374 | _base_add_sg_single_ieee(sg_local, chain_sgl_flags, | |
1375 | chain_offset, chain_length, chain_dma); | |
1376 | ||
1377 | sg_local = chain; | |
1378 | if (!chain_offset) | |
1379 | goto fill_in_last_segment; | |
1380 | ||
1381 | /* fill in chain segments */ | |
1382 | while (sges_in_segment) { | |
1383 | _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, | |
1384 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); | |
1385 | sg_scmd = sg_next(sg_scmd); | |
1386 | sg_local += ioc->sge_size_ieee; | |
1387 | sges_left--; | |
1388 | sges_in_segment--; | |
1389 | } | |
1390 | ||
1391 | chain_req = _base_get_chain_buffer_tracker(ioc, smid); | |
1392 | if (!chain_req) | |
1393 | return -1; | |
1394 | chain = chain_req->chain_buffer; | |
1395 | chain_dma = chain_req->chain_buffer_dma; | |
1396 | } while (1); | |
1397 | ||
1398 | ||
1399 | fill_in_last_segment: | |
1400 | ||
1401 | /* fill the last segment */ | |
1402 | while (sges_left) { | |
1403 | if (sges_left == 1) | |
1404 | _base_add_sg_single_ieee(sg_local, | |
1405 | simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), | |
1406 | sg_dma_address(sg_scmd)); | |
1407 | else | |
1408 | _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, | |
1409 | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); | |
1410 | sg_scmd = sg_next(sg_scmd); | |
1411 | sg_local += ioc->sge_size_ieee; | |
1412 | sges_left--; | |
1413 | } | |
1414 | ||
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | /** | |
1419 | * _base_build_sg_ieee - build generic sg for IEEE format | |
1420 | * @ioc: per adapter object | |
1421 | * @psge: virtual address for SGE | |
1422 | * @data_out_dma: physical address for WRITES | |
1423 | * @data_out_sz: data xfer size for WRITES | |
1424 | * @data_in_dma: physical address for READS | |
1425 | * @data_in_sz: data xfer size for READS | |
1426 | * | |
1427 | * Return nothing. | |
1428 | */ | |
1429 | static void | |
1430 | _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, | |
1431 | dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, | |
1432 | size_t data_in_sz) | |
1433 | { | |
1434 | u8 sgl_flags; | |
1435 | ||
1436 | if (!data_out_sz && !data_in_sz) { | |
1437 | _base_build_zero_len_sge_ieee(ioc, psge); | |
1438 | return; | |
1439 | } | |
1440 | ||
1441 | if (data_out_sz && data_in_sz) { | |
1442 | /* WRITE sgel first */ | |
1443 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | | |
1444 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; | |
1445 | _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, | |
1446 | data_out_dma); | |
1447 | ||
1448 | /* incr sgel */ | |
1449 | psge += ioc->sge_size_ieee; | |
1450 | ||
1451 | /* READ sgel last */ | |
1452 | sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; | |
1453 | _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, | |
1454 | data_in_dma); | |
1455 | } else if (data_out_sz) /* WRITE */ { | |
1456 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | | |
1457 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST | | |
1458 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; | |
1459 | _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, | |
1460 | data_out_dma); | |
1461 | } else if (data_in_sz) /* READ */ { | |
1462 | sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | | |
1463 | MPI25_IEEE_SGE_FLAGS_END_OF_LIST | | |
1464 | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; | |
1465 | _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, | |
1466 | data_in_dma); | |
1467 | } | |
1468 | } | |
1469 | ||
1470 | #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) | |
1471 | ||
1472 | /** | |
1473 | * _base_config_dma_addressing - set dma addressing | |
1474 | * @ioc: per adapter object | |
1475 | * @pdev: PCI device struct | |
1476 | * | |
1477 | * Returns 0 for success, non-zero for failure. | |
1478 | */ | |
1479 | static int | |
1480 | _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) | |
1481 | { | |
1482 | struct sysinfo s; | |
1483 | char *desc = NULL; | |
1484 | ||
1485 | if (sizeof(dma_addr_t) > 4) { | |
1486 | const uint64_t required_mask = | |
1487 | dma_get_required_mask(&pdev->dev); | |
1488 | if ((required_mask > DMA_BIT_MASK(32)) && | |
1489 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && | |
1490 | !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
1491 | ioc->base_add_sg_single = &_base_add_sg_single_64; | |
1492 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); | |
1493 | desc = "64"; | |
1494 | goto out; | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) | |
1499 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { | |
1500 | ioc->base_add_sg_single = &_base_add_sg_single_32; | |
1501 | ioc->sge_size = sizeof(Mpi2SGESimple32_t); | |
1502 | desc = "32"; | |
1503 | } else | |
1504 | return -ENODEV; | |
1505 | ||
1506 | out: | |
1507 | si_meminfo(&s); | |
1508 | pr_info(MPT3SAS_FMT | |
1509 | "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", | |
1510 | ioc->name, desc, convert_to_kb(s.totalram)); | |
1511 | ||
1512 | return 0; | |
1513 | } | |
1514 | ||
1515 | /** | |
1516 | * _base_check_enable_msix - checks MSIX capabable. | |
1517 | * @ioc: per adapter object | |
1518 | * | |
1519 | * Check to see if card is capable of MSIX, and set number | |
1520 | * of available msix vectors | |
1521 | */ | |
1522 | static int | |
1523 | _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) | |
1524 | { | |
1525 | int base; | |
1526 | u16 message_control; | |
1527 | ||
1528 | base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); | |
1529 | if (!base) { | |
1530 | dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", | |
1531 | ioc->name)); | |
1532 | return -EINVAL; | |
1533 | } | |
1534 | ||
1535 | /* get msix vector count */ | |
1536 | ||
1537 | pci_read_config_word(ioc->pdev, base + 2, &message_control); | |
1538 | ioc->msix_vector_count = (message_control & 0x3FF) + 1; | |
1539 | if (ioc->msix_vector_count > 8) | |
1540 | ioc->msix_vector_count = 8; | |
1541 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
1542 | "msix is supported, vector_count(%d)\n", | |
1543 | ioc->name, ioc->msix_vector_count)); | |
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | /** | |
1548 | * _base_free_irq - free irq | |
1549 | * @ioc: per adapter object | |
1550 | * | |
1551 | * Freeing respective reply_queue from the list. | |
1552 | */ | |
1553 | static void | |
1554 | _base_free_irq(struct MPT3SAS_ADAPTER *ioc) | |
1555 | { | |
1556 | struct adapter_reply_queue *reply_q, *next; | |
1557 | ||
1558 | if (list_empty(&ioc->reply_queue_list)) | |
1559 | return; | |
1560 | ||
1561 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { | |
1562 | list_del(&reply_q->list); | |
1563 | synchronize_irq(reply_q->vector); | |
1564 | free_irq(reply_q->vector, reply_q); | |
1565 | kfree(reply_q); | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | /** | |
1570 | * _base_request_irq - request irq | |
1571 | * @ioc: per adapter object | |
1572 | * @index: msix index into vector table | |
1573 | * @vector: irq vector | |
1574 | * | |
1575 | * Inserting respective reply_queue into the list. | |
1576 | */ | |
1577 | static int | |
1578 | _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) | |
1579 | { | |
1580 | struct adapter_reply_queue *reply_q; | |
1581 | int r; | |
1582 | ||
1583 | reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); | |
1584 | if (!reply_q) { | |
1585 | pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", | |
1586 | ioc->name, (int)sizeof(struct adapter_reply_queue)); | |
1587 | return -ENOMEM; | |
1588 | } | |
1589 | reply_q->ioc = ioc; | |
1590 | reply_q->msix_index = index; | |
1591 | reply_q->vector = vector; | |
1592 | atomic_set(&reply_q->busy, 0); | |
1593 | if (ioc->msix_enable) | |
1594 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", | |
1595 | MPT3SAS_DRIVER_NAME, ioc->id, index); | |
1596 | else | |
1597 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", | |
1598 | MPT3SAS_DRIVER_NAME, ioc->id); | |
1599 | r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, | |
1600 | reply_q); | |
1601 | if (r) { | |
1602 | pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", | |
1603 | reply_q->name, vector); | |
1604 | kfree(reply_q); | |
1605 | return -EBUSY; | |
1606 | } | |
1607 | ||
1608 | INIT_LIST_HEAD(&reply_q->list); | |
1609 | list_add_tail(&reply_q->list, &ioc->reply_queue_list); | |
1610 | return 0; | |
1611 | } | |
1612 | ||
1613 | /** | |
1614 | * _base_assign_reply_queues - assigning msix index for each cpu | |
1615 | * @ioc: per adapter object | |
1616 | * | |
1617 | * The enduser would need to set the affinity via /proc/irq/#/smp_affinity | |
1618 | * | |
1619 | * It would nice if we could call irq_set_affinity, however it is not | |
1620 | * an exported symbol | |
1621 | */ | |
1622 | static void | |
1623 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |
1624 | { | |
1625 | struct adapter_reply_queue *reply_q; | |
1626 | int cpu_id; | |
1627 | int cpu_grouping, loop, grouping, grouping_mod; | |
1628 | int reply_queue; | |
1629 | ||
1630 | if (!_base_is_controller_msix_enabled(ioc)) | |
1631 | return; | |
1632 | ||
1633 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); | |
1634 | ||
1635 | /* NUMA Hardware bug workaround - drop to less reply queues */ | |
1636 | if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { | |
1637 | ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; | |
1638 | reply_queue = 0; | |
1639 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | |
1640 | reply_q->msix_index = reply_queue; | |
1641 | if (++reply_queue == ioc->reply_queue_count) | |
1642 | reply_queue = 0; | |
1643 | } | |
1644 | } | |
1645 | ||
1646 | /* when there are more cpus than available msix vectors, | |
1647 | * then group cpus togeather on same irq | |
1648 | */ | |
1649 | if (ioc->cpu_count > ioc->msix_vector_count) { | |
1650 | grouping = ioc->cpu_count / ioc->msix_vector_count; | |
1651 | grouping_mod = ioc->cpu_count % ioc->msix_vector_count; | |
1652 | if (grouping < 2 || (grouping == 2 && !grouping_mod)) | |
1653 | cpu_grouping = 2; | |
1654 | else if (grouping < 4 || (grouping == 4 && !grouping_mod)) | |
1655 | cpu_grouping = 4; | |
1656 | else if (grouping < 8 || (grouping == 8 && !grouping_mod)) | |
1657 | cpu_grouping = 8; | |
1658 | else | |
1659 | cpu_grouping = 16; | |
1660 | } else | |
1661 | cpu_grouping = 0; | |
1662 | ||
1663 | loop = 0; | |
1664 | reply_q = list_entry(ioc->reply_queue_list.next, | |
1665 | struct adapter_reply_queue, list); | |
1666 | for_each_online_cpu(cpu_id) { | |
1667 | if (!cpu_grouping) { | |
1668 | ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; | |
1669 | reply_q = list_entry(reply_q->list.next, | |
1670 | struct adapter_reply_queue, list); | |
1671 | } else { | |
1672 | if (loop < cpu_grouping) { | |
1673 | ioc->cpu_msix_table[cpu_id] = | |
1674 | reply_q->msix_index; | |
1675 | loop++; | |
1676 | } else { | |
1677 | reply_q = list_entry(reply_q->list.next, | |
1678 | struct adapter_reply_queue, list); | |
1679 | ioc->cpu_msix_table[cpu_id] = | |
1680 | reply_q->msix_index; | |
1681 | loop = 1; | |
1682 | } | |
1683 | } | |
1684 | } | |
1685 | } | |
1686 | ||
1687 | /** | |
1688 | * _base_disable_msix - disables msix | |
1689 | * @ioc: per adapter object | |
1690 | * | |
1691 | */ | |
1692 | static void | |
1693 | _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) | |
1694 | { | |
1695 | if (!ioc->msix_enable) | |
1696 | return; | |
1697 | pci_disable_msix(ioc->pdev); | |
1698 | ioc->msix_enable = 0; | |
1699 | } | |
1700 | ||
1701 | /** | |
1702 | * _base_enable_msix - enables msix, failback to io_apic | |
1703 | * @ioc: per adapter object | |
1704 | * | |
1705 | */ | |
1706 | static int | |
1707 | _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) | |
1708 | { | |
1709 | struct msix_entry *entries, *a; | |
1710 | int r; | |
1711 | int i; | |
1712 | u8 try_msix = 0; | |
1713 | ||
1714 | INIT_LIST_HEAD(&ioc->reply_queue_list); | |
1715 | ||
1716 | if (msix_disable == -1 || msix_disable == 0) | |
1717 | try_msix = 1; | |
1718 | ||
1719 | if (!try_msix) | |
1720 | goto try_ioapic; | |
1721 | ||
1722 | if (_base_check_enable_msix(ioc) != 0) | |
1723 | goto try_ioapic; | |
1724 | ||
1725 | ioc->reply_queue_count = min_t(int, ioc->cpu_count, | |
1726 | ioc->msix_vector_count); | |
1727 | ||
1728 | entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), | |
1729 | GFP_KERNEL); | |
1730 | if (!entries) { | |
1731 | dfailprintk(ioc, pr_info(MPT3SAS_FMT | |
1732 | "kcalloc failed @ at %s:%d/%s() !!!\n", | |
1733 | ioc->name, __FILE__, __LINE__, __func__)); | |
1734 | goto try_ioapic; | |
1735 | } | |
1736 | ||
1737 | for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) | |
1738 | a->entry = i; | |
1739 | ||
1740 | r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); | |
1741 | if (r) { | |
1742 | dfailprintk(ioc, pr_info(MPT3SAS_FMT | |
1743 | "pci_enable_msix failed (r=%d) !!!\n", | |
1744 | ioc->name, r)); | |
1745 | kfree(entries); | |
1746 | goto try_ioapic; | |
1747 | } | |
1748 | ||
1749 | ioc->msix_enable = 1; | |
1750 | for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { | |
1751 | r = _base_request_irq(ioc, i, a->vector); | |
1752 | if (r) { | |
1753 | _base_free_irq(ioc); | |
1754 | _base_disable_msix(ioc); | |
1755 | kfree(entries); | |
1756 | goto try_ioapic; | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | kfree(entries); | |
1761 | return 0; | |
1762 | ||
1763 | /* failback to io_apic interrupt routing */ | |
1764 | try_ioapic: | |
1765 | ||
1766 | r = _base_request_irq(ioc, 0, ioc->pdev->irq); | |
1767 | ||
1768 | return r; | |
1769 | } | |
1770 | ||
1771 | /** | |
1772 | * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) | |
1773 | * @ioc: per adapter object | |
1774 | * | |
1775 | * Returns 0 for success, non-zero for failure. | |
1776 | */ | |
1777 | int | |
1778 | mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |
1779 | { | |
1780 | struct pci_dev *pdev = ioc->pdev; | |
1781 | u32 memap_sz; | |
1782 | u32 pio_sz; | |
1783 | int i, r = 0; | |
1784 | u64 pio_chip = 0; | |
1785 | u64 chip_phys = 0; | |
1786 | struct adapter_reply_queue *reply_q; | |
1787 | ||
1788 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", | |
1789 | ioc->name, __func__)); | |
1790 | ||
1791 | ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
1792 | if (pci_enable_device_mem(pdev)) { | |
1793 | pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", | |
1794 | ioc->name); | |
1795 | return -ENODEV; | |
1796 | } | |
1797 | ||
1798 | ||
1799 | if (pci_request_selected_regions(pdev, ioc->bars, | |
1800 | MPT3SAS_DRIVER_NAME)) { | |
1801 | pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", | |
1802 | ioc->name); | |
1803 | r = -ENODEV; | |
1804 | goto out_fail; | |
1805 | } | |
1806 | ||
1807 | /* AER (Advanced Error Reporting) hooks */ | |
1808 | pci_enable_pcie_error_reporting(pdev); | |
1809 | ||
1810 | pci_set_master(pdev); | |
1811 | ||
1812 | ||
1813 | if (_base_config_dma_addressing(ioc, pdev) != 0) { | |
1814 | pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", | |
1815 | ioc->name, pci_name(pdev)); | |
1816 | r = -ENODEV; | |
1817 | goto out_fail; | |
1818 | } | |
1819 | ||
1820 | for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { | |
1821 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { | |
1822 | if (pio_sz) | |
1823 | continue; | |
1824 | pio_chip = (u64)pci_resource_start(pdev, i); | |
1825 | pio_sz = pci_resource_len(pdev, i); | |
1826 | } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { | |
1827 | if (memap_sz) | |
1828 | continue; | |
1829 | ioc->chip_phys = pci_resource_start(pdev, i); | |
1830 | chip_phys = (u64)ioc->chip_phys; | |
1831 | memap_sz = pci_resource_len(pdev, i); | |
1832 | ioc->chip = ioremap(ioc->chip_phys, memap_sz); | |
1833 | if (ioc->chip == NULL) { | |
1834 | pr_err(MPT3SAS_FMT "unable to map adapter memory!\n", | |
1835 | ioc->name); | |
1836 | r = -EINVAL; | |
1837 | goto out_fail; | |
1838 | } | |
1839 | } | |
1840 | } | |
1841 | ||
1842 | _base_mask_interrupts(ioc); | |
1843 | r = _base_enable_msix(ioc); | |
1844 | if (r) | |
1845 | goto out_fail; | |
1846 | ||
1847 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) | |
1848 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", | |
1849 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : | |
1850 | "IO-APIC enabled"), reply_q->vector); | |
1851 | ||
1852 | pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", | |
1853 | ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); | |
1854 | pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", | |
1855 | ioc->name, (unsigned long long)pio_chip, pio_sz); | |
1856 | ||
1857 | /* Save PCI configuration state for recovery from PCI AER/EEH errors */ | |
1858 | pci_save_state(pdev); | |
1859 | return 0; | |
1860 | ||
1861 | out_fail: | |
1862 | if (ioc->chip_phys) | |
1863 | iounmap(ioc->chip); | |
1864 | ioc->chip_phys = 0; | |
1865 | pci_release_selected_regions(ioc->pdev, ioc->bars); | |
1866 | pci_disable_pcie_error_reporting(pdev); | |
1867 | pci_disable_device(pdev); | |
1868 | return r; | |
1869 | } | |
1870 | ||
1871 | /** | |
1872 | * mpt3sas_base_get_msg_frame - obtain request mf pointer | |
1873 | * @ioc: per adapter object | |
1874 | * @smid: system request message index(smid zero is invalid) | |
1875 | * | |
1876 | * Returns virt pointer to message frame. | |
1877 | */ | |
1878 | void * | |
1879 | mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
1880 | { | |
1881 | return (void *)(ioc->request + (smid * ioc->request_sz)); | |
1882 | } | |
1883 | ||
1884 | /** | |
1885 | * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr | |
1886 | * @ioc: per adapter object | |
1887 | * @smid: system request message index | |
1888 | * | |
1889 | * Returns virt pointer to sense buffer. | |
1890 | */ | |
1891 | void * | |
1892 | mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
1893 | { | |
1894 | return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); | |
1895 | } | |
1896 | ||
1897 | /** | |
1898 | * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr | |
1899 | * @ioc: per adapter object | |
1900 | * @smid: system request message index | |
1901 | * | |
1902 | * Returns phys pointer to the low 32bit address of the sense buffer. | |
1903 | */ | |
1904 | __le32 | |
1905 | mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
1906 | { | |
1907 | return cpu_to_le32(ioc->sense_dma + ((smid - 1) * | |
1908 | SCSI_SENSE_BUFFERSIZE)); | |
1909 | } | |
1910 | ||
1911 | /** | |
1912 | * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address | |
1913 | * @ioc: per adapter object | |
1914 | * @phys_addr: lower 32 physical addr of the reply | |
1915 | * | |
1916 | * Converts 32bit lower physical addr into a virt address. | |
1917 | */ | |
1918 | void * | |
1919 | mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) | |
1920 | { | |
1921 | if (!phys_addr) | |
1922 | return NULL; | |
1923 | return ioc->reply + (phys_addr - (u32)ioc->reply_dma); | |
1924 | } | |
1925 | ||
1926 | /** | |
1927 | * mpt3sas_base_get_smid - obtain a free smid from internal queue | |
1928 | * @ioc: per adapter object | |
1929 | * @cb_idx: callback index | |
1930 | * | |
1931 | * Returns smid (zero is invalid) | |
1932 | */ | |
1933 | u16 | |
1934 | mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) | |
1935 | { | |
1936 | unsigned long flags; | |
1937 | struct request_tracker *request; | |
1938 | u16 smid; | |
1939 | ||
1940 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
1941 | if (list_empty(&ioc->internal_free_list)) { | |
1942 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1943 | pr_err(MPT3SAS_FMT "%s: smid not available\n", | |
1944 | ioc->name, __func__); | |
1945 | return 0; | |
1946 | } | |
1947 | ||
1948 | request = list_entry(ioc->internal_free_list.next, | |
1949 | struct request_tracker, tracker_list); | |
1950 | request->cb_idx = cb_idx; | |
1951 | smid = request->smid; | |
1952 | list_del(&request->tracker_list); | |
1953 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1954 | return smid; | |
1955 | } | |
1956 | ||
1957 | /** | |
1958 | * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue | |
1959 | * @ioc: per adapter object | |
1960 | * @cb_idx: callback index | |
1961 | * @scmd: pointer to scsi command object | |
1962 | * | |
1963 | * Returns smid (zero is invalid) | |
1964 | */ | |
1965 | u16 | |
1966 | mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, | |
1967 | struct scsi_cmnd *scmd) | |
1968 | { | |
1969 | unsigned long flags; | |
1970 | struct scsiio_tracker *request; | |
1971 | u16 smid; | |
1972 | ||
1973 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
1974 | if (list_empty(&ioc->free_list)) { | |
1975 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1976 | pr_err(MPT3SAS_FMT "%s: smid not available\n", | |
1977 | ioc->name, __func__); | |
1978 | return 0; | |
1979 | } | |
1980 | ||
1981 | request = list_entry(ioc->free_list.next, | |
1982 | struct scsiio_tracker, tracker_list); | |
1983 | request->scmd = scmd; | |
1984 | request->cb_idx = cb_idx; | |
1985 | smid = request->smid; | |
1986 | list_del(&request->tracker_list); | |
1987 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
1988 | return smid; | |
1989 | } | |
1990 | ||
1991 | /** | |
1992 | * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue | |
1993 | * @ioc: per adapter object | |
1994 | * @cb_idx: callback index | |
1995 | * | |
1996 | * Returns smid (zero is invalid) | |
1997 | */ | |
1998 | u16 | |
1999 | mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) | |
2000 | { | |
2001 | unsigned long flags; | |
2002 | struct request_tracker *request; | |
2003 | u16 smid; | |
2004 | ||
2005 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
2006 | if (list_empty(&ioc->hpr_free_list)) { | |
2007 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
2008 | return 0; | |
2009 | } | |
2010 | ||
2011 | request = list_entry(ioc->hpr_free_list.next, | |
2012 | struct request_tracker, tracker_list); | |
2013 | request->cb_idx = cb_idx; | |
2014 | smid = request->smid; | |
2015 | list_del(&request->tracker_list); | |
2016 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
2017 | return smid; | |
2018 | } | |
2019 | ||
2020 | /** | |
2021 | * mpt3sas_base_free_smid - put smid back on free_list | |
2022 | * @ioc: per adapter object | |
2023 | * @smid: system request message index | |
2024 | * | |
2025 | * Return nothing. | |
2026 | */ | |
2027 | void | |
2028 | mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
2029 | { | |
2030 | unsigned long flags; | |
2031 | int i; | |
2032 | struct chain_tracker *chain_req, *next; | |
2033 | ||
2034 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
2035 | if (smid < ioc->hi_priority_smid) { | |
2036 | /* scsiio queue */ | |
2037 | i = smid - 1; | |
2038 | if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { | |
2039 | list_for_each_entry_safe(chain_req, next, | |
2040 | &ioc->scsi_lookup[i].chain_list, tracker_list) { | |
2041 | list_del_init(&chain_req->tracker_list); | |
2042 | list_add(&chain_req->tracker_list, | |
2043 | &ioc->free_chain_list); | |
2044 | } | |
2045 | } | |
2046 | ioc->scsi_lookup[i].cb_idx = 0xFF; | |
2047 | ioc->scsi_lookup[i].scmd = NULL; | |
2048 | list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); | |
2049 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
2050 | ||
2051 | /* | |
2052 | * See _wait_for_commands_to_complete() call with regards | |
2053 | * to this code. | |
2054 | */ | |
2055 | if (ioc->shost_recovery && ioc->pending_io_count) { | |
2056 | if (ioc->pending_io_count == 1) | |
2057 | wake_up(&ioc->reset_wq); | |
2058 | ioc->pending_io_count--; | |
2059 | } | |
2060 | return; | |
2061 | } else if (smid < ioc->internal_smid) { | |
2062 | /* hi-priority */ | |
2063 | i = smid - ioc->hi_priority_smid; | |
2064 | ioc->hpr_lookup[i].cb_idx = 0xFF; | |
2065 | list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); | |
2066 | } else if (smid <= ioc->hba_queue_depth) { | |
2067 | /* internal queue */ | |
2068 | i = smid - ioc->internal_smid; | |
2069 | ioc->internal_lookup[i].cb_idx = 0xFF; | |
2070 | list_add(&ioc->internal_lookup[i].tracker_list, | |
2071 | &ioc->internal_free_list); | |
2072 | } | |
2073 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
2074 | } | |
2075 | ||
2076 | /** | |
2077 | * _base_writeq - 64 bit write to MMIO | |
2078 | * @ioc: per adapter object | |
2079 | * @b: data payload | |
2080 | * @addr: address in MMIO space | |
2081 | * @writeq_lock: spin lock | |
2082 | * | |
2083 | * Glue for handling an atomic 64 bit word to MMIO. This special handling takes | |
2084 | * care of 32 bit environment where its not quarenteed to send the entire word | |
2085 | * in one transfer. | |
2086 | */ | |
2087 | #if defined(writeq) && defined(CONFIG_64BIT) | |
2088 | static inline void | |
2089 | _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) | |
2090 | { | |
2091 | writeq(cpu_to_le64(b), addr); | |
2092 | } | |
2093 | #else | |
2094 | static inline void | |
2095 | _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) | |
2096 | { | |
2097 | unsigned long flags; | |
2098 | __u64 data_out = cpu_to_le64(b); | |
2099 | ||
2100 | spin_lock_irqsave(writeq_lock, flags); | |
2101 | writel((u32)(data_out), addr); | |
2102 | writel((u32)(data_out >> 32), (addr + 4)); | |
2103 | spin_unlock_irqrestore(writeq_lock, flags); | |
2104 | } | |
2105 | #endif | |
2106 | ||
2107 | static inline u8 | |
2108 | _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) | |
2109 | { | |
2110 | return ioc->cpu_msix_table[raw_smp_processor_id()]; | |
2111 | } | |
2112 | ||
2113 | /** | |
2114 | * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware | |
2115 | * @ioc: per adapter object | |
2116 | * @smid: system request message index | |
2117 | * @handle: device handle | |
2118 | * | |
2119 | * Return nothing. | |
2120 | */ | |
2121 | void | |
2122 | mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) | |
2123 | { | |
2124 | Mpi2RequestDescriptorUnion_t descriptor; | |
2125 | u64 *request = (u64 *)&descriptor; | |
2126 | ||
2127 | ||
2128 | descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; | |
2129 | descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); | |
2130 | descriptor.SCSIIO.SMID = cpu_to_le16(smid); | |
2131 | descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); | |
2132 | descriptor.SCSIIO.LMID = 0; | |
2133 | _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, | |
2134 | &ioc->scsi_lookup_lock); | |
2135 | } | |
2136 | ||
2137 | /** | |
2138 | * mpt3sas_base_put_smid_fast_path - send fast path request to firmware | |
2139 | * @ioc: per adapter object | |
2140 | * @smid: system request message index | |
2141 | * @handle: device handle | |
2142 | * | |
2143 | * Return nothing. | |
2144 | */ | |
2145 | void | |
2146 | mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |
2147 | u16 handle) | |
2148 | { | |
2149 | Mpi2RequestDescriptorUnion_t descriptor; | |
2150 | u64 *request = (u64 *)&descriptor; | |
2151 | ||
2152 | descriptor.SCSIIO.RequestFlags = | |
2153 | MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; | |
2154 | descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); | |
2155 | descriptor.SCSIIO.SMID = cpu_to_le16(smid); | |
2156 | descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); | |
2157 | descriptor.SCSIIO.LMID = 0; | |
2158 | _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, | |
2159 | &ioc->scsi_lookup_lock); | |
2160 | } | |
2161 | ||
2162 | /** | |
2163 | * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware | |
2164 | * @ioc: per adapter object | |
2165 | * @smid: system request message index | |
2166 | * | |
2167 | * Return nothing. | |
2168 | */ | |
2169 | void | |
2170 | mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
2171 | { | |
2172 | Mpi2RequestDescriptorUnion_t descriptor; | |
2173 | u64 *request = (u64 *)&descriptor; | |
2174 | ||
2175 | descriptor.HighPriority.RequestFlags = | |
2176 | MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; | |
2177 | descriptor.HighPriority.MSIxIndex = 0; | |
2178 | descriptor.HighPriority.SMID = cpu_to_le16(smid); | |
2179 | descriptor.HighPriority.LMID = 0; | |
2180 | descriptor.HighPriority.Reserved1 = 0; | |
2181 | _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, | |
2182 | &ioc->scsi_lookup_lock); | |
2183 | } | |
2184 | ||
2185 | /** | |
2186 | * mpt3sas_base_put_smid_default - Default, primarily used for config pages | |
2187 | * @ioc: per adapter object | |
2188 | * @smid: system request message index | |
2189 | * | |
2190 | * Return nothing. | |
2191 | */ | |
2192 | void | |
2193 | mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |
2194 | { | |
2195 | Mpi2RequestDescriptorUnion_t descriptor; | |
2196 | u64 *request = (u64 *)&descriptor; | |
2197 | ||
2198 | descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; | |
2199 | descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); | |
2200 | descriptor.Default.SMID = cpu_to_le16(smid); | |
2201 | descriptor.Default.LMID = 0; | |
2202 | descriptor.Default.DescriptorTypeDependent = 0; | |
2203 | _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, | |
2204 | &ioc->scsi_lookup_lock); | |
2205 | } | |
2206 | ||
2207 | ||
2208 | ||
2209 | /** | |
2210 | * _base_display_ioc_capabilities - Disply IOC's capabilities. | |
2211 | * @ioc: per adapter object | |
2212 | * | |
2213 | * Return nothing. | |
2214 | */ | |
2215 | static void | |
2216 | _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) | |
2217 | { | |
2218 | int i = 0; | |
2219 | char desc[16]; | |
2220 | u32 iounit_pg1_flags; | |
2221 | u32 bios_version; | |
2222 | ||
2223 | bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); | |
2224 | strncpy(desc, ioc->manu_pg0.ChipName, 16); | |
2225 | pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ | |
2226 | "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", | |
2227 | ioc->name, desc, | |
2228 | (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, | |
2229 | (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, | |
2230 | (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, | |
2231 | ioc->facts.FWVersion.Word & 0x000000FF, | |
2232 | ioc->pdev->revision, | |
2233 | (bios_version & 0xFF000000) >> 24, | |
2234 | (bios_version & 0x00FF0000) >> 16, | |
2235 | (bios_version & 0x0000FF00) >> 8, | |
2236 | bios_version & 0x000000FF); | |
2237 | ||
2238 | pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); | |
2239 | ||
2240 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { | |
2241 | pr_info("Initiator"); | |
2242 | i++; | |
2243 | } | |
2244 | ||
2245 | if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { | |
2246 | pr_info("%sTarget", i ? "," : ""); | |
2247 | i++; | |
2248 | } | |
2249 | ||
2250 | i = 0; | |
2251 | pr_info("), "); | |
2252 | pr_info("Capabilities=("); | |
2253 | ||
2254 | if (ioc->facts.IOCCapabilities & | |
2255 | MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { | |
2256 | pr_info("Raid"); | |
2257 | i++; | |
2258 | } | |
2259 | ||
2260 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { | |
2261 | pr_info("%sTLR", i ? "," : ""); | |
2262 | i++; | |
2263 | } | |
2264 | ||
2265 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { | |
2266 | pr_info("%sMulticast", i ? "," : ""); | |
2267 | i++; | |
2268 | } | |
2269 | ||
2270 | if (ioc->facts.IOCCapabilities & | |
2271 | MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { | |
2272 | pr_info("%sBIDI Target", i ? "," : ""); | |
2273 | i++; | |
2274 | } | |
2275 | ||
2276 | if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { | |
2277 | pr_info("%sEEDP", i ? "," : ""); | |
2278 | i++; | |
2279 | } | |
2280 | ||
2281 | if (ioc->facts.IOCCapabilities & | |
2282 | MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { | |
2283 | pr_info("%sSnapshot Buffer", i ? "," : ""); | |
2284 | i++; | |
2285 | } | |
2286 | ||
2287 | if (ioc->facts.IOCCapabilities & | |
2288 | MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { | |
2289 | pr_info("%sDiag Trace Buffer", i ? "," : ""); | |
2290 | i++; | |
2291 | } | |
2292 | ||
2293 | if (ioc->facts.IOCCapabilities & | |
2294 | MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { | |
2295 | pr_info("%sDiag Extended Buffer", i ? "," : ""); | |
2296 | i++; | |
2297 | } | |
2298 | ||
2299 | if (ioc->facts.IOCCapabilities & | |
2300 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { | |
2301 | pr_info("%sTask Set Full", i ? "," : ""); | |
2302 | i++; | |
2303 | } | |
2304 | ||
2305 | iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); | |
2306 | if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { | |
2307 | pr_info("%sNCQ", i ? "," : ""); | |
2308 | i++; | |
2309 | } | |
2310 | ||
2311 | pr_info(")\n"); | |
2312 | } | |
2313 | ||
2314 | /** | |
2315 | * mpt3sas_base_update_missing_delay - change the missing delay timers | |
2316 | * @ioc: per adapter object | |
2317 | * @device_missing_delay: amount of time till device is reported missing | |
2318 | * @io_missing_delay: interval IO is returned when there is a missing device | |
2319 | * | |
2320 | * Return nothing. | |
2321 | * | |
2322 | * Passed on the command line, this function will modify the device missing | |
2323 | * delay, as well as the io missing delay. This should be called at driver | |
2324 | * load time. | |
2325 | */ | |
2326 | void | |
2327 | mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, | |
2328 | u16 device_missing_delay, u8 io_missing_delay) | |
2329 | { | |
2330 | u16 dmd, dmd_new, dmd_orignal; | |
2331 | u8 io_missing_delay_original; | |
2332 | u16 sz; | |
2333 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; | |
2334 | Mpi2ConfigReply_t mpi_reply; | |
2335 | u8 num_phys = 0; | |
2336 | u16 ioc_status; | |
2337 | ||
2338 | mpt3sas_config_get_number_hba_phys(ioc, &num_phys); | |
2339 | if (!num_phys) | |
2340 | return; | |
2341 | ||
2342 | sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * | |
2343 | sizeof(Mpi2SasIOUnit1PhyData_t)); | |
2344 | sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); | |
2345 | if (!sas_iounit_pg1) { | |
2346 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | |
2347 | ioc->name, __FILE__, __LINE__, __func__); | |
2348 | goto out; | |
2349 | } | |
2350 | if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, | |
2351 | sas_iounit_pg1, sz))) { | |
2352 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | |
2353 | ioc->name, __FILE__, __LINE__, __func__); | |
2354 | goto out; | |
2355 | } | |
2356 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | |
2357 | MPI2_IOCSTATUS_MASK; | |
2358 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | |
2359 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | |
2360 | ioc->name, __FILE__, __LINE__, __func__); | |
2361 | goto out; | |
2362 | } | |
2363 | ||
2364 | /* device missing delay */ | |
2365 | dmd = sas_iounit_pg1->ReportDeviceMissingDelay; | |
2366 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) | |
2367 | dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; | |
2368 | else | |
2369 | dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; | |
2370 | dmd_orignal = dmd; | |
2371 | if (device_missing_delay > 0x7F) { | |
2372 | dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : | |
2373 | device_missing_delay; | |
2374 | dmd = dmd / 16; | |
2375 | dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; | |
2376 | } else | |
2377 | dmd = device_missing_delay; | |
2378 | sas_iounit_pg1->ReportDeviceMissingDelay = dmd; | |
2379 | ||
2380 | /* io missing delay */ | |
2381 | io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; | |
2382 | sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; | |
2383 | ||
2384 | if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, | |
2385 | sz)) { | |
2386 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) | |
2387 | dmd_new = (dmd & | |
2388 | MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; | |
2389 | else | |
2390 | dmd_new = | |
2391 | dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; | |
2392 | pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", | |
2393 | ioc->name, dmd_orignal, dmd_new); | |
2394 | pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", | |
2395 | ioc->name, io_missing_delay_original, | |
2396 | io_missing_delay); | |
2397 | ioc->device_missing_delay = dmd_new; | |
2398 | ioc->io_missing_delay = io_missing_delay; | |
2399 | } | |
2400 | ||
2401 | out: | |
2402 | kfree(sas_iounit_pg1); | |
2403 | } | |
2404 | /** | |
2405 | * _base_static_config_pages - static start of day config pages | |
2406 | * @ioc: per adapter object | |
2407 | * | |
2408 | * Return nothing. | |
2409 | */ | |
2410 | static void | |
2411 | _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |
2412 | { | |
2413 | Mpi2ConfigReply_t mpi_reply; | |
2414 | u32 iounit_pg1_flags; | |
2415 | ||
2416 | mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); | |
2417 | if (ioc->ir_firmware) | |
2418 | mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, | |
2419 | &ioc->manu_pg10); | |
2420 | ||
2421 | /* | |
2422 | * Ensure correct T10 PI operation if vendor left EEDPTagMode | |
2423 | * flag unset in NVDATA. | |
2424 | */ | |
2425 | mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); | |
2426 | if (ioc->manu_pg11.EEDPTagMode == 0) { | |
2427 | pr_err("%s: overriding NVDATA EEDPTagMode setting\n", | |
2428 | ioc->name); | |
2429 | ioc->manu_pg11.EEDPTagMode &= ~0x3; | |
2430 | ioc->manu_pg11.EEDPTagMode |= 0x1; | |
2431 | mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, | |
2432 | &ioc->manu_pg11); | |
2433 | } | |
2434 | ||
2435 | mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); | |
2436 | mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); | |
2437 | mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); | |
2438 | mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); | |
2439 | mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | |
2440 | _base_display_ioc_capabilities(ioc); | |
2441 | ||
2442 | /* | |
2443 | * Enable task_set_full handling in iounit_pg1 when the | |
2444 | * facts capabilities indicate that its supported. | |
2445 | */ | |
2446 | iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); | |
2447 | if ((ioc->facts.IOCCapabilities & | |
2448 | MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) | |
2449 | iounit_pg1_flags &= | |
2450 | ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; | |
2451 | else | |
2452 | iounit_pg1_flags |= | |
2453 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; | |
2454 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | |
2455 | mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | |
2456 | } | |
2457 | ||
2458 | /** | |
2459 | * _base_release_memory_pools - release memory | |
2460 | * @ioc: per adapter object | |
2461 | * | |
2462 | * Free memory allocated from _base_allocate_memory_pools. | |
2463 | * | |
2464 | * Return nothing. | |
2465 | */ | |
2466 | static void | |
2467 | _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) | |
2468 | { | |
2469 | int i; | |
2470 | ||
2471 | dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
2472 | __func__)); | |
2473 | ||
2474 | if (ioc->request) { | |
2475 | pci_free_consistent(ioc->pdev, ioc->request_dma_sz, | |
2476 | ioc->request, ioc->request_dma); | |
2477 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2478 | "request_pool(0x%p): free\n", | |
2479 | ioc->name, ioc->request)); | |
2480 | ioc->request = NULL; | |
2481 | } | |
2482 | ||
2483 | if (ioc->sense) { | |
2484 | pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); | |
2485 | if (ioc->sense_dma_pool) | |
2486 | pci_pool_destroy(ioc->sense_dma_pool); | |
2487 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2488 | "sense_pool(0x%p): free\n", | |
2489 | ioc->name, ioc->sense)); | |
2490 | ioc->sense = NULL; | |
2491 | } | |
2492 | ||
2493 | if (ioc->reply) { | |
2494 | pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); | |
2495 | if (ioc->reply_dma_pool) | |
2496 | pci_pool_destroy(ioc->reply_dma_pool); | |
2497 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2498 | "reply_pool(0x%p): free\n", | |
2499 | ioc->name, ioc->reply)); | |
2500 | ioc->reply = NULL; | |
2501 | } | |
2502 | ||
2503 | if (ioc->reply_free) { | |
2504 | pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, | |
2505 | ioc->reply_free_dma); | |
2506 | if (ioc->reply_free_dma_pool) | |
2507 | pci_pool_destroy(ioc->reply_free_dma_pool); | |
2508 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2509 | "reply_free_pool(0x%p): free\n", | |
2510 | ioc->name, ioc->reply_free)); | |
2511 | ioc->reply_free = NULL; | |
2512 | } | |
2513 | ||
2514 | if (ioc->reply_post_free) { | |
2515 | pci_pool_free(ioc->reply_post_free_dma_pool, | |
2516 | ioc->reply_post_free, ioc->reply_post_free_dma); | |
2517 | if (ioc->reply_post_free_dma_pool) | |
2518 | pci_pool_destroy(ioc->reply_post_free_dma_pool); | |
2519 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2520 | "reply_post_free_pool(0x%p): free\n", ioc->name, | |
2521 | ioc->reply_post_free)); | |
2522 | ioc->reply_post_free = NULL; | |
2523 | } | |
2524 | ||
2525 | if (ioc->config_page) { | |
2526 | dexitprintk(ioc, pr_info(MPT3SAS_FMT | |
2527 | "config_page(0x%p): free\n", ioc->name, | |
2528 | ioc->config_page)); | |
2529 | pci_free_consistent(ioc->pdev, ioc->config_page_sz, | |
2530 | ioc->config_page, ioc->config_page_dma); | |
2531 | } | |
2532 | ||
2533 | if (ioc->scsi_lookup) { | |
2534 | free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); | |
2535 | ioc->scsi_lookup = NULL; | |
2536 | } | |
2537 | kfree(ioc->hpr_lookup); | |
2538 | kfree(ioc->internal_lookup); | |
2539 | if (ioc->chain_lookup) { | |
2540 | for (i = 0; i < ioc->chain_depth; i++) { | |
2541 | if (ioc->chain_lookup[i].chain_buffer) | |
2542 | pci_pool_free(ioc->chain_dma_pool, | |
2543 | ioc->chain_lookup[i].chain_buffer, | |
2544 | ioc->chain_lookup[i].chain_buffer_dma); | |
2545 | } | |
2546 | if (ioc->chain_dma_pool) | |
2547 | pci_pool_destroy(ioc->chain_dma_pool); | |
2548 | free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); | |
2549 | ioc->chain_lookup = NULL; | |
2550 | } | |
2551 | } | |
2552 | ||
2553 | /** | |
2554 | * _base_allocate_memory_pools - allocate start of day memory pools | |
2555 | * @ioc: per adapter object | |
2556 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
2557 | * | |
2558 | * Returns 0 success, anything else error | |
2559 | */ | |
2560 | static int | |
2561 | _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
2562 | { | |
2563 | struct mpt3sas_facts *facts; | |
2564 | u16 max_sge_elements; | |
2565 | u16 chains_needed_per_io; | |
2566 | u32 sz, total_sz, reply_post_free_sz; | |
2567 | u32 retry_sz; | |
2568 | u16 max_request_credit; | |
2569 | unsigned short sg_tablesize; | |
2570 | u16 sge_size; | |
2571 | int i; | |
2572 | ||
2573 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
2574 | __func__)); | |
2575 | ||
2576 | ||
2577 | retry_sz = 0; | |
2578 | facts = &ioc->facts; | |
2579 | ||
2580 | /* command line tunables for max sgl entries */ | |
2581 | if (max_sgl_entries != -1) | |
2582 | sg_tablesize = max_sgl_entries; | |
2583 | else | |
2584 | sg_tablesize = MPT3SAS_SG_DEPTH; | |
2585 | ||
2586 | if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) | |
2587 | sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; | |
2588 | else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) | |
2589 | sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS; | |
2590 | ioc->shost->sg_tablesize = sg_tablesize; | |
2591 | ||
2592 | ioc->hi_priority_depth = facts->HighPriorityCredit; | |
2593 | ioc->internal_depth = ioc->hi_priority_depth + (5); | |
2594 | /* command line tunables for max controller queue depth */ | |
2595 | if (max_queue_depth != -1 && max_queue_depth != 0) { | |
2596 | max_request_credit = min_t(u16, max_queue_depth + | |
2597 | ioc->hi_priority_depth + ioc->internal_depth, | |
2598 | facts->RequestCredit); | |
2599 | if (max_request_credit > MAX_HBA_QUEUE_DEPTH) | |
2600 | max_request_credit = MAX_HBA_QUEUE_DEPTH; | |
2601 | } else | |
2602 | max_request_credit = min_t(u16, facts->RequestCredit, | |
2603 | MAX_HBA_QUEUE_DEPTH); | |
2604 | ||
2605 | ioc->hba_queue_depth = max_request_credit; | |
2606 | ||
2607 | /* request frame size */ | |
2608 | ioc->request_sz = facts->IOCRequestFrameSize * 4; | |
2609 | ||
2610 | /* reply frame size */ | |
2611 | ioc->reply_sz = facts->ReplyFrameSize * 4; | |
2612 | ||
2613 | /* calculate the max scatter element size */ | |
2614 | sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); | |
2615 | ||
2616 | retry_allocation: | |
2617 | total_sz = 0; | |
2618 | /* calculate number of sg elements left over in the 1st frame */ | |
2619 | max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - | |
2620 | sizeof(Mpi2SGEIOUnion_t)) + sge_size); | |
2621 | ioc->max_sges_in_main_message = max_sge_elements/sge_size; | |
2622 | ||
2623 | /* now do the same for a chain buffer */ | |
2624 | max_sge_elements = ioc->request_sz - sge_size; | |
2625 | ioc->max_sges_in_chain_message = max_sge_elements/sge_size; | |
2626 | ||
2627 | /* | |
2628 | * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE | |
2629 | */ | |
2630 | chains_needed_per_io = ((ioc->shost->sg_tablesize - | |
2631 | ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) | |
2632 | + 1; | |
2633 | if (chains_needed_per_io > facts->MaxChainDepth) { | |
2634 | chains_needed_per_io = facts->MaxChainDepth; | |
2635 | ioc->shost->sg_tablesize = min_t(u16, | |
2636 | ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message | |
2637 | * chains_needed_per_io), ioc->shost->sg_tablesize); | |
2638 | } | |
2639 | ioc->chains_needed_per_io = chains_needed_per_io; | |
2640 | ||
2641 | /* reply free queue sizing - taking into account for 64 FW events */ | |
2642 | ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; | |
2643 | ||
2644 | /* calculate reply descriptor post queue depth */ | |
2645 | ioc->reply_post_queue_depth = ioc->hba_queue_depth + | |
2646 | ioc->reply_free_queue_depth + 1 ; | |
2647 | /* align the reply post queue on the next 16 count boundary */ | |
2648 | if (ioc->reply_post_queue_depth % 16) | |
2649 | ioc->reply_post_queue_depth += 16 - | |
2650 | (ioc->reply_post_queue_depth % 16); | |
2651 | ||
2652 | ||
2653 | if (ioc->reply_post_queue_depth > | |
2654 | facts->MaxReplyDescriptorPostQueueDepth) { | |
2655 | ioc->reply_post_queue_depth = | |
2656 | facts->MaxReplyDescriptorPostQueueDepth - | |
2657 | (facts->MaxReplyDescriptorPostQueueDepth % 16); | |
2658 | ioc->hba_queue_depth = | |
2659 | ((ioc->reply_post_queue_depth - 64) / 2) - 1; | |
2660 | ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; | |
2661 | } | |
2662 | ||
2663 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ | |
2664 | "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " | |
2665 | "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, | |
2666 | ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, | |
2667 | ioc->chains_needed_per_io)); | |
2668 | ||
2669 | ioc->scsiio_depth = ioc->hba_queue_depth - | |
2670 | ioc->hi_priority_depth - ioc->internal_depth; | |
2671 | ||
2672 | /* set the scsi host can_queue depth | |
2673 | * with some internal commands that could be outstanding | |
2674 | */ | |
2675 | ioc->shost->can_queue = ioc->scsiio_depth; | |
2676 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2677 | "scsi host: can_queue depth (%d)\n", | |
2678 | ioc->name, ioc->shost->can_queue)); | |
2679 | ||
2680 | ||
2681 | /* contiguous pool for request and chains, 16 byte align, one extra " | |
2682 | * "frame for smid=0 | |
2683 | */ | |
2684 | ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; | |
2685 | sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); | |
2686 | ||
2687 | /* hi-priority queue */ | |
2688 | sz += (ioc->hi_priority_depth * ioc->request_sz); | |
2689 | ||
2690 | /* internal queue */ | |
2691 | sz += (ioc->internal_depth * ioc->request_sz); | |
2692 | ||
2693 | ioc->request_dma_sz = sz; | |
2694 | ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); | |
2695 | if (!ioc->request) { | |
2696 | pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ | |
2697 | "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " | |
2698 | "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, | |
2699 | ioc->chains_needed_per_io, ioc->request_sz, sz/1024); | |
2700 | if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) | |
2701 | goto out; | |
2702 | retry_sz += 64; | |
2703 | ioc->hba_queue_depth = max_request_credit - retry_sz; | |
2704 | goto retry_allocation; | |
2705 | } | |
2706 | ||
2707 | if (retry_sz) | |
2708 | pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ | |
2709 | "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " | |
2710 | "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, | |
2711 | ioc->chains_needed_per_io, ioc->request_sz, sz/1024); | |
2712 | ||
2713 | /* hi-priority queue */ | |
2714 | ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * | |
2715 | ioc->request_sz); | |
2716 | ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * | |
2717 | ioc->request_sz); | |
2718 | ||
2719 | /* internal queue */ | |
2720 | ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * | |
2721 | ioc->request_sz); | |
2722 | ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * | |
2723 | ioc->request_sz); | |
2724 | ||
2725 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2726 | "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", | |
2727 | ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, | |
2728 | (ioc->hba_queue_depth * ioc->request_sz)/1024)); | |
2729 | ||
2730 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", | |
2731 | ioc->name, (unsigned long long) ioc->request_dma)); | |
2732 | total_sz += sz; | |
2733 | ||
2734 | sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); | |
2735 | ioc->scsi_lookup_pages = get_order(sz); | |
2736 | ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( | |
2737 | GFP_KERNEL, ioc->scsi_lookup_pages); | |
2738 | if (!ioc->scsi_lookup) { | |
2739 | pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n", | |
2740 | ioc->name, (int)sz); | |
2741 | goto out; | |
2742 | } | |
2743 | ||
2744 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", | |
2745 | ioc->name, ioc->request, ioc->scsiio_depth)); | |
2746 | ||
2747 | ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); | |
2748 | sz = ioc->chain_depth * sizeof(struct chain_tracker); | |
2749 | ioc->chain_pages = get_order(sz); | |
2750 | ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( | |
2751 | GFP_KERNEL, ioc->chain_pages); | |
2752 | if (!ioc->chain_lookup) { | |
2753 | pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", | |
2754 | ioc->name); | |
2755 | goto out; | |
2756 | } | |
2757 | ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, | |
2758 | ioc->request_sz, 16, 0); | |
2759 | if (!ioc->chain_dma_pool) { | |
2760 | pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", | |
2761 | ioc->name); | |
2762 | goto out; | |
2763 | } | |
2764 | for (i = 0; i < ioc->chain_depth; i++) { | |
2765 | ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( | |
2766 | ioc->chain_dma_pool , GFP_KERNEL, | |
2767 | &ioc->chain_lookup[i].chain_buffer_dma); | |
2768 | if (!ioc->chain_lookup[i].chain_buffer) { | |
2769 | ioc->chain_depth = i; | |
2770 | goto chain_done; | |
2771 | } | |
2772 | total_sz += ioc->request_sz; | |
2773 | } | |
2774 | chain_done: | |
2775 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2776 | "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", | |
2777 | ioc->name, ioc->chain_depth, ioc->request_sz, | |
2778 | ((ioc->chain_depth * ioc->request_sz))/1024)); | |
2779 | ||
2780 | /* initialize hi-priority queue smid's */ | |
2781 | ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, | |
2782 | sizeof(struct request_tracker), GFP_KERNEL); | |
2783 | if (!ioc->hpr_lookup) { | |
2784 | pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", | |
2785 | ioc->name); | |
2786 | goto out; | |
2787 | } | |
2788 | ioc->hi_priority_smid = ioc->scsiio_depth + 1; | |
2789 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2790 | "hi_priority(0x%p): depth(%d), start smid(%d)\n", | |
2791 | ioc->name, ioc->hi_priority, | |
2792 | ioc->hi_priority_depth, ioc->hi_priority_smid)); | |
2793 | ||
2794 | /* initialize internal queue smid's */ | |
2795 | ioc->internal_lookup = kcalloc(ioc->internal_depth, | |
2796 | sizeof(struct request_tracker), GFP_KERNEL); | |
2797 | if (!ioc->internal_lookup) { | |
2798 | pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", | |
2799 | ioc->name); | |
2800 | goto out; | |
2801 | } | |
2802 | ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; | |
2803 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2804 | "internal(0x%p): depth(%d), start smid(%d)\n", | |
2805 | ioc->name, ioc->internal, | |
2806 | ioc->internal_depth, ioc->internal_smid)); | |
2807 | ||
2808 | /* sense buffers, 4 byte align */ | |
2809 | sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; | |
2810 | ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, | |
2811 | 0); | |
2812 | if (!ioc->sense_dma_pool) { | |
2813 | pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", | |
2814 | ioc->name); | |
2815 | goto out; | |
2816 | } | |
2817 | ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, | |
2818 | &ioc->sense_dma); | |
2819 | if (!ioc->sense) { | |
2820 | pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", | |
2821 | ioc->name); | |
2822 | goto out; | |
2823 | } | |
2824 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2825 | "sense pool(0x%p): depth(%d), element_size(%d), pool_size" | |
2826 | "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, | |
2827 | SCSI_SENSE_BUFFERSIZE, sz/1024)); | |
2828 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", | |
2829 | ioc->name, (unsigned long long)ioc->sense_dma)); | |
2830 | total_sz += sz; | |
2831 | ||
2832 | /* reply pool, 4 byte align */ | |
2833 | sz = ioc->reply_free_queue_depth * ioc->reply_sz; | |
2834 | ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, | |
2835 | 0); | |
2836 | if (!ioc->reply_dma_pool) { | |
2837 | pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", | |
2838 | ioc->name); | |
2839 | goto out; | |
2840 | } | |
2841 | ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, | |
2842 | &ioc->reply_dma); | |
2843 | if (!ioc->reply) { | |
2844 | pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", | |
2845 | ioc->name); | |
2846 | goto out; | |
2847 | } | |
2848 | ioc->reply_dma_min_address = (u32)(ioc->reply_dma); | |
2849 | ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; | |
2850 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2851 | "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", | |
2852 | ioc->name, ioc->reply, | |
2853 | ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); | |
2854 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", | |
2855 | ioc->name, (unsigned long long)ioc->reply_dma)); | |
2856 | total_sz += sz; | |
2857 | ||
2858 | /* reply free queue, 16 byte align */ | |
2859 | sz = ioc->reply_free_queue_depth * 4; | |
2860 | ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", | |
2861 | ioc->pdev, sz, 16, 0); | |
2862 | if (!ioc->reply_free_dma_pool) { | |
2863 | pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n", | |
2864 | ioc->name); | |
2865 | goto out; | |
2866 | } | |
2867 | ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, | |
2868 | &ioc->reply_free_dma); | |
2869 | if (!ioc->reply_free) { | |
2870 | pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n", | |
2871 | ioc->name); | |
2872 | goto out; | |
2873 | } | |
2874 | memset(ioc->reply_free, 0, sz); | |
2875 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ | |
2876 | "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, | |
2877 | ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); | |
2878 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2879 | "reply_free_dma (0x%llx)\n", | |
2880 | ioc->name, (unsigned long long)ioc->reply_free_dma)); | |
2881 | total_sz += sz; | |
2882 | ||
2883 | /* reply post queue, 16 byte align */ | |
2884 | reply_post_free_sz = ioc->reply_post_queue_depth * | |
2885 | sizeof(Mpi2DefaultReplyDescriptor_t); | |
2886 | if (_base_is_controller_msix_enabled(ioc)) | |
2887 | sz = reply_post_free_sz * ioc->reply_queue_count; | |
2888 | else | |
2889 | sz = reply_post_free_sz; | |
2890 | ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", | |
2891 | ioc->pdev, sz, 16, 0); | |
2892 | if (!ioc->reply_post_free_dma_pool) { | |
2893 | pr_err(MPT3SAS_FMT | |
2894 | "reply_post_free pool: pci_pool_create failed\n", | |
2895 | ioc->name); | |
2896 | goto out; | |
2897 | } | |
2898 | ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool , | |
2899 | GFP_KERNEL, &ioc->reply_post_free_dma); | |
2900 | if (!ioc->reply_post_free) { | |
2901 | pr_err(MPT3SAS_FMT | |
2902 | "reply_post_free pool: pci_pool_alloc failed\n", | |
2903 | ioc->name); | |
2904 | goto out; | |
2905 | } | |
2906 | memset(ioc->reply_post_free, 0, sz); | |
2907 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \ | |
2908 | "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", | |
2909 | ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8, | |
2910 | sz/1024)); | |
2911 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2912 | "reply_post_free_dma = (0x%llx)\n", | |
2913 | ioc->name, (unsigned long long) | |
2914 | ioc->reply_post_free_dma)); | |
2915 | total_sz += sz; | |
2916 | ||
2917 | ioc->config_page_sz = 512; | |
2918 | ioc->config_page = pci_alloc_consistent(ioc->pdev, | |
2919 | ioc->config_page_sz, &ioc->config_page_dma); | |
2920 | if (!ioc->config_page) { | |
2921 | pr_err(MPT3SAS_FMT | |
2922 | "config page: pci_pool_alloc failed\n", | |
2923 | ioc->name); | |
2924 | goto out; | |
2925 | } | |
2926 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
2927 | "config page(0x%p): size(%d)\n", | |
2928 | ioc->name, ioc->config_page, ioc->config_page_sz)); | |
2929 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", | |
2930 | ioc->name, (unsigned long long)ioc->config_page_dma)); | |
2931 | total_sz += ioc->config_page_sz; | |
2932 | ||
2933 | pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", | |
2934 | ioc->name, total_sz/1024); | |
2935 | pr_info(MPT3SAS_FMT | |
2936 | "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", | |
2937 | ioc->name, ioc->shost->can_queue, facts->RequestCredit); | |
2938 | pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", | |
2939 | ioc->name, ioc->shost->sg_tablesize); | |
2940 | return 0; | |
2941 | ||
2942 | out: | |
2943 | return -ENOMEM; | |
2944 | } | |
2945 | ||
2946 | /** | |
2947 | * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. | |
2948 | * @ioc: Pointer to MPT_ADAPTER structure | |
2949 | * @cooked: Request raw or cooked IOC state | |
2950 | * | |
2951 | * Returns all IOC Doorbell register bits if cooked==0, else just the | |
2952 | * Doorbell bits in MPI_IOC_STATE_MASK. | |
2953 | */ | |
2954 | u32 | |
2955 | mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) | |
2956 | { | |
2957 | u32 s, sc; | |
2958 | ||
2959 | s = readl(&ioc->chip->Doorbell); | |
2960 | sc = s & MPI2_IOC_STATE_MASK; | |
2961 | return cooked ? sc : s; | |
2962 | } | |
2963 | ||
2964 | /** | |
2965 | * _base_wait_on_iocstate - waiting on a particular ioc state | |
2966 | * @ioc_state: controller state { READY, OPERATIONAL, or RESET } | |
2967 | * @timeout: timeout in second | |
2968 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
2969 | * | |
2970 | * Returns 0 for success, non-zero for failure. | |
2971 | */ | |
2972 | static int | |
2973 | _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout, | |
2974 | int sleep_flag) | |
2975 | { | |
2976 | u32 count, cntdn; | |
2977 | u32 current_state; | |
2978 | ||
2979 | count = 0; | |
2980 | cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; | |
2981 | do { | |
2982 | current_state = mpt3sas_base_get_iocstate(ioc, 1); | |
2983 | if (current_state == ioc_state) | |
2984 | return 0; | |
2985 | if (count && current_state == MPI2_IOC_STATE_FAULT) | |
2986 | break; | |
2987 | if (sleep_flag == CAN_SLEEP) | |
2988 | usleep_range(1000, 1500); | |
2989 | else | |
2990 | udelay(500); | |
2991 | count++; | |
2992 | } while (--cntdn); | |
2993 | ||
2994 | return current_state; | |
2995 | } | |
2996 | ||
2997 | /** | |
2998 | * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by | |
2999 | * a write to the doorbell) | |
3000 | * @ioc: per adapter object | |
3001 | * @timeout: timeout in second | |
3002 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3003 | * | |
3004 | * Returns 0 for success, non-zero for failure. | |
3005 | * | |
3006 | * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. | |
3007 | */ | |
3008 | static int | |
3009 | _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, | |
3010 | int sleep_flag) | |
3011 | { | |
3012 | u32 cntdn, count; | |
3013 | u32 int_status; | |
3014 | ||
3015 | count = 0; | |
3016 | cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; | |
3017 | do { | |
3018 | int_status = readl(&ioc->chip->HostInterruptStatus); | |
3019 | if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { | |
3020 | dhsprintk(ioc, pr_info(MPT3SAS_FMT | |
3021 | "%s: successful count(%d), timeout(%d)\n", | |
3022 | ioc->name, __func__, count, timeout)); | |
3023 | return 0; | |
3024 | } | |
3025 | if (sleep_flag == CAN_SLEEP) | |
3026 | usleep_range(1000, 1500); | |
3027 | else | |
3028 | udelay(500); | |
3029 | count++; | |
3030 | } while (--cntdn); | |
3031 | ||
3032 | pr_err(MPT3SAS_FMT | |
3033 | "%s: failed due to timeout count(%d), int_status(%x)!\n", | |
3034 | ioc->name, __func__, count, int_status); | |
3035 | return -EFAULT; | |
3036 | } | |
3037 | ||
3038 | /** | |
3039 | * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. | |
3040 | * @ioc: per adapter object | |
3041 | * @timeout: timeout in second | |
3042 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3043 | * | |
3044 | * Returns 0 for success, non-zero for failure. | |
3045 | * | |
3046 | * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to | |
3047 | * doorbell. | |
3048 | */ | |
3049 | static int | |
3050 | _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout, | |
3051 | int sleep_flag) | |
3052 | { | |
3053 | u32 cntdn, count; | |
3054 | u32 int_status; | |
3055 | u32 doorbell; | |
3056 | ||
3057 | count = 0; | |
3058 | cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; | |
3059 | do { | |
3060 | int_status = readl(&ioc->chip->HostInterruptStatus); | |
3061 | if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { | |
3062 | dhsprintk(ioc, pr_info(MPT3SAS_FMT | |
3063 | "%s: successful count(%d), timeout(%d)\n", | |
3064 | ioc->name, __func__, count, timeout)); | |
3065 | return 0; | |
3066 | } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { | |
3067 | doorbell = readl(&ioc->chip->Doorbell); | |
3068 | if ((doorbell & MPI2_IOC_STATE_MASK) == | |
3069 | MPI2_IOC_STATE_FAULT) { | |
3070 | mpt3sas_base_fault_info(ioc , doorbell); | |
3071 | return -EFAULT; | |
3072 | } | |
3073 | } else if (int_status == 0xFFFFFFFF) | |
3074 | goto out; | |
3075 | ||
3076 | if (sleep_flag == CAN_SLEEP) | |
3077 | usleep_range(1000, 1500); | |
3078 | else | |
3079 | udelay(500); | |
3080 | count++; | |
3081 | } while (--cntdn); | |
3082 | ||
3083 | out: | |
3084 | pr_err(MPT3SAS_FMT | |
3085 | "%s: failed due to timeout count(%d), int_status(%x)!\n", | |
3086 | ioc->name, __func__, count, int_status); | |
3087 | return -EFAULT; | |
3088 | } | |
3089 | ||
3090 | /** | |
3091 | * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use | |
3092 | * @ioc: per adapter object | |
3093 | * @timeout: timeout in second | |
3094 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3095 | * | |
3096 | * Returns 0 for success, non-zero for failure. | |
3097 | * | |
3098 | */ | |
3099 | static int | |
3100 | _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout, | |
3101 | int sleep_flag) | |
3102 | { | |
3103 | u32 cntdn, count; | |
3104 | u32 doorbell_reg; | |
3105 | ||
3106 | count = 0; | |
3107 | cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; | |
3108 | do { | |
3109 | doorbell_reg = readl(&ioc->chip->Doorbell); | |
3110 | if (!(doorbell_reg & MPI2_DOORBELL_USED)) { | |
3111 | dhsprintk(ioc, pr_info(MPT3SAS_FMT | |
3112 | "%s: successful count(%d), timeout(%d)\n", | |
3113 | ioc->name, __func__, count, timeout)); | |
3114 | return 0; | |
3115 | } | |
3116 | if (sleep_flag == CAN_SLEEP) | |
3117 | usleep_range(1000, 1500); | |
3118 | else | |
3119 | udelay(500); | |
3120 | count++; | |
3121 | } while (--cntdn); | |
3122 | ||
3123 | pr_err(MPT3SAS_FMT | |
3124 | "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", | |
3125 | ioc->name, __func__, count, doorbell_reg); | |
3126 | return -EFAULT; | |
3127 | } | |
3128 | ||
3129 | /** | |
3130 | * _base_send_ioc_reset - send doorbell reset | |
3131 | * @ioc: per adapter object | |
3132 | * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET | |
3133 | * @timeout: timeout in second | |
3134 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3135 | * | |
3136 | * Returns 0 for success, non-zero for failure. | |
3137 | */ | |
3138 | static int | |
3139 | _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout, | |
3140 | int sleep_flag) | |
3141 | { | |
3142 | u32 ioc_state; | |
3143 | int r = 0; | |
3144 | ||
3145 | if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { | |
3146 | pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", | |
3147 | ioc->name, __func__); | |
3148 | return -EFAULT; | |
3149 | } | |
3150 | ||
3151 | if (!(ioc->facts.IOCCapabilities & | |
3152 | MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) | |
3153 | return -EFAULT; | |
3154 | ||
3155 | pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); | |
3156 | ||
3157 | writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, | |
3158 | &ioc->chip->Doorbell); | |
3159 | if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { | |
3160 | r = -EFAULT; | |
3161 | goto out; | |
3162 | } | |
3163 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, | |
3164 | timeout, sleep_flag); | |
3165 | if (ioc_state) { | |
3166 | pr_err(MPT3SAS_FMT | |
3167 | "%s: failed going to ready state (ioc_state=0x%x)\n", | |
3168 | ioc->name, __func__, ioc_state); | |
3169 | r = -EFAULT; | |
3170 | goto out; | |
3171 | } | |
3172 | out: | |
3173 | pr_info(MPT3SAS_FMT "message unit reset: %s\n", | |
3174 | ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); | |
3175 | return r; | |
3176 | } | |
3177 | ||
3178 | /** | |
3179 | * _base_handshake_req_reply_wait - send request thru doorbell interface | |
3180 | * @ioc: per adapter object | |
3181 | * @request_bytes: request length | |
3182 | * @request: pointer having request payload | |
3183 | * @reply_bytes: reply length | |
3184 | * @reply: pointer to reply payload | |
3185 | * @timeout: timeout in second | |
3186 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3187 | * | |
3188 | * Returns 0 for success, non-zero for failure. | |
3189 | */ | |
3190 | static int | |
3191 | _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, | |
3192 | u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) | |
3193 | { | |
3194 | MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; | |
3195 | int i; | |
3196 | u8 failed; | |
3197 | u16 dummy; | |
3198 | __le32 *mfp; | |
3199 | ||
3200 | /* make sure doorbell is not in use */ | |
3201 | if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { | |
3202 | pr_err(MPT3SAS_FMT | |
3203 | "doorbell is in use (line=%d)\n", | |
3204 | ioc->name, __LINE__); | |
3205 | return -EFAULT; | |
3206 | } | |
3207 | ||
3208 | /* clear pending doorbell interrupts from previous state changes */ | |
3209 | if (readl(&ioc->chip->HostInterruptStatus) & | |
3210 | MPI2_HIS_IOC2SYS_DB_STATUS) | |
3211 | writel(0, &ioc->chip->HostInterruptStatus); | |
3212 | ||
3213 | /* send message to ioc */ | |
3214 | writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | | |
3215 | ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), | |
3216 | &ioc->chip->Doorbell); | |
3217 | ||
3218 | if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { | |
3219 | pr_err(MPT3SAS_FMT | |
3220 | "doorbell handshake int failed (line=%d)\n", | |
3221 | ioc->name, __LINE__); | |
3222 | return -EFAULT; | |
3223 | } | |
3224 | writel(0, &ioc->chip->HostInterruptStatus); | |
3225 | ||
3226 | if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { | |
3227 | pr_err(MPT3SAS_FMT | |
3228 | "doorbell handshake ack failed (line=%d)\n", | |
3229 | ioc->name, __LINE__); | |
3230 | return -EFAULT; | |
3231 | } | |
3232 | ||
3233 | /* send message 32-bits at a time */ | |
3234 | for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { | |
3235 | writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); | |
3236 | if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) | |
3237 | failed = 1; | |
3238 | } | |
3239 | ||
3240 | if (failed) { | |
3241 | pr_err(MPT3SAS_FMT | |
3242 | "doorbell handshake sending request failed (line=%d)\n", | |
3243 | ioc->name, __LINE__); | |
3244 | return -EFAULT; | |
3245 | } | |
3246 | ||
3247 | /* now wait for the reply */ | |
3248 | if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { | |
3249 | pr_err(MPT3SAS_FMT | |
3250 | "doorbell handshake int failed (line=%d)\n", | |
3251 | ioc->name, __LINE__); | |
3252 | return -EFAULT; | |
3253 | } | |
3254 | ||
3255 | /* read the first two 16-bits, it gives the total length of the reply */ | |
3256 | reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) | |
3257 | & MPI2_DOORBELL_DATA_MASK); | |
3258 | writel(0, &ioc->chip->HostInterruptStatus); | |
3259 | if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { | |
3260 | pr_err(MPT3SAS_FMT | |
3261 | "doorbell handshake int failed (line=%d)\n", | |
3262 | ioc->name, __LINE__); | |
3263 | return -EFAULT; | |
3264 | } | |
3265 | reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) | |
3266 | & MPI2_DOORBELL_DATA_MASK); | |
3267 | writel(0, &ioc->chip->HostInterruptStatus); | |
3268 | ||
3269 | for (i = 2; i < default_reply->MsgLength * 2; i++) { | |
3270 | if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { | |
3271 | pr_err(MPT3SAS_FMT | |
3272 | "doorbell handshake int failed (line=%d)\n", | |
3273 | ioc->name, __LINE__); | |
3274 | return -EFAULT; | |
3275 | } | |
3276 | if (i >= reply_bytes/2) /* overflow case */ | |
3277 | dummy = readl(&ioc->chip->Doorbell); | |
3278 | else | |
3279 | reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) | |
3280 | & MPI2_DOORBELL_DATA_MASK); | |
3281 | writel(0, &ioc->chip->HostInterruptStatus); | |
3282 | } | |
3283 | ||
3284 | _base_wait_for_doorbell_int(ioc, 5, sleep_flag); | |
3285 | if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { | |
3286 | dhsprintk(ioc, pr_info(MPT3SAS_FMT | |
3287 | "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); | |
3288 | } | |
3289 | writel(0, &ioc->chip->HostInterruptStatus); | |
3290 | ||
3291 | if (ioc->logging_level & MPT_DEBUG_INIT) { | |
3292 | mfp = (__le32 *)reply; | |
3293 | pr_info("\toffset:data\n"); | |
3294 | for (i = 0; i < reply_bytes/4; i++) | |
3295 | pr_info("\t[0x%02x]:%08x\n", i*4, | |
3296 | le32_to_cpu(mfp[i])); | |
3297 | } | |
3298 | return 0; | |
3299 | } | |
3300 | ||
3301 | /** | |
3302 | * mpt3sas_base_sas_iounit_control - send sas iounit control to FW | |
3303 | * @ioc: per adapter object | |
3304 | * @mpi_reply: the reply payload from FW | |
3305 | * @mpi_request: the request payload sent to FW | |
3306 | * | |
3307 | * The SAS IO Unit Control Request message allows the host to perform low-level | |
3308 | * operations, such as resets on the PHYs of the IO Unit, also allows the host | |
3309 | * to obtain the IOC assigned device handles for a device if it has other | |
3310 | * identifying information about the device, in addition allows the host to | |
3311 | * remove IOC resources associated with the device. | |
3312 | * | |
3313 | * Returns 0 for success, non-zero for failure. | |
3314 | */ | |
3315 | int | |
3316 | mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |
3317 | Mpi2SasIoUnitControlReply_t *mpi_reply, | |
3318 | Mpi2SasIoUnitControlRequest_t *mpi_request) | |
3319 | { | |
3320 | u16 smid; | |
3321 | u32 ioc_state; | |
3322 | unsigned long timeleft; | |
3323 | u8 issue_reset; | |
3324 | int rc; | |
3325 | void *request; | |
3326 | u16 wait_state_count; | |
3327 | ||
3328 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3329 | __func__)); | |
3330 | ||
3331 | mutex_lock(&ioc->base_cmds.mutex); | |
3332 | ||
3333 | if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { | |
3334 | pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", | |
3335 | ioc->name, __func__); | |
3336 | rc = -EAGAIN; | |
3337 | goto out; | |
3338 | } | |
3339 | ||
3340 | wait_state_count = 0; | |
3341 | ioc_state = mpt3sas_base_get_iocstate(ioc, 1); | |
3342 | while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { | |
3343 | if (wait_state_count++ == 10) { | |
3344 | pr_err(MPT3SAS_FMT | |
3345 | "%s: failed due to ioc not operational\n", | |
3346 | ioc->name, __func__); | |
3347 | rc = -EFAULT; | |
3348 | goto out; | |
3349 | } | |
3350 | ssleep(1); | |
3351 | ioc_state = mpt3sas_base_get_iocstate(ioc, 1); | |
3352 | pr_info(MPT3SAS_FMT | |
3353 | "%s: waiting for operational state(count=%d)\n", | |
3354 | ioc->name, __func__, wait_state_count); | |
3355 | } | |
3356 | ||
3357 | smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); | |
3358 | if (!smid) { | |
3359 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
3360 | ioc->name, __func__); | |
3361 | rc = -EAGAIN; | |
3362 | goto out; | |
3363 | } | |
3364 | ||
3365 | rc = 0; | |
3366 | ioc->base_cmds.status = MPT3_CMD_PENDING; | |
3367 | request = mpt3sas_base_get_msg_frame(ioc, smid); | |
3368 | ioc->base_cmds.smid = smid; | |
3369 | memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); | |
3370 | if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || | |
3371 | mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) | |
3372 | ioc->ioc_link_reset_in_progress = 1; | |
3373 | init_completion(&ioc->base_cmds.done); | |
3374 | mpt3sas_base_put_smid_default(ioc, smid); | |
3375 | timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, | |
3376 | msecs_to_jiffies(10000)); | |
3377 | if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || | |
3378 | mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && | |
3379 | ioc->ioc_link_reset_in_progress) | |
3380 | ioc->ioc_link_reset_in_progress = 0; | |
3381 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { | |
3382 | pr_err(MPT3SAS_FMT "%s: timeout\n", | |
3383 | ioc->name, __func__); | |
3384 | _debug_dump_mf(mpi_request, | |
3385 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | |
3386 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | |
3387 | issue_reset = 1; | |
3388 | goto issue_host_reset; | |
3389 | } | |
3390 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | |
3391 | memcpy(mpi_reply, ioc->base_cmds.reply, | |
3392 | sizeof(Mpi2SasIoUnitControlReply_t)); | |
3393 | else | |
3394 | memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); | |
3395 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
3396 | goto out; | |
3397 | ||
3398 | issue_host_reset: | |
3399 | if (issue_reset) | |
3400 | mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | |
3401 | FORCE_BIG_HAMMER); | |
3402 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
3403 | rc = -EFAULT; | |
3404 | out: | |
3405 | mutex_unlock(&ioc->base_cmds.mutex); | |
3406 | return rc; | |
3407 | } | |
3408 | ||
3409 | /** | |
3410 | * mpt3sas_base_scsi_enclosure_processor - sending request to sep device | |
3411 | * @ioc: per adapter object | |
3412 | * @mpi_reply: the reply payload from FW | |
3413 | * @mpi_request: the request payload sent to FW | |
3414 | * | |
3415 | * The SCSI Enclosure Processor request message causes the IOC to | |
3416 | * communicate with SES devices to control LED status signals. | |
3417 | * | |
3418 | * Returns 0 for success, non-zero for failure. | |
3419 | */ | |
3420 | int | |
3421 | mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |
3422 | Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) | |
3423 | { | |
3424 | u16 smid; | |
3425 | u32 ioc_state; | |
3426 | unsigned long timeleft; | |
3427 | u8 issue_reset; | |
3428 | int rc; | |
3429 | void *request; | |
3430 | u16 wait_state_count; | |
3431 | ||
3432 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3433 | __func__)); | |
3434 | ||
3435 | mutex_lock(&ioc->base_cmds.mutex); | |
3436 | ||
3437 | if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { | |
3438 | pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", | |
3439 | ioc->name, __func__); | |
3440 | rc = -EAGAIN; | |
3441 | goto out; | |
3442 | } | |
3443 | ||
3444 | wait_state_count = 0; | |
3445 | ioc_state = mpt3sas_base_get_iocstate(ioc, 1); | |
3446 | while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { | |
3447 | if (wait_state_count++ == 10) { | |
3448 | pr_err(MPT3SAS_FMT | |
3449 | "%s: failed due to ioc not operational\n", | |
3450 | ioc->name, __func__); | |
3451 | rc = -EFAULT; | |
3452 | goto out; | |
3453 | } | |
3454 | ssleep(1); | |
3455 | ioc_state = mpt3sas_base_get_iocstate(ioc, 1); | |
3456 | pr_info(MPT3SAS_FMT | |
3457 | "%s: waiting for operational state(count=%d)\n", | |
3458 | ioc->name, | |
3459 | __func__, wait_state_count); | |
3460 | } | |
3461 | ||
3462 | smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); | |
3463 | if (!smid) { | |
3464 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
3465 | ioc->name, __func__); | |
3466 | rc = -EAGAIN; | |
3467 | goto out; | |
3468 | } | |
3469 | ||
3470 | rc = 0; | |
3471 | ioc->base_cmds.status = MPT3_CMD_PENDING; | |
3472 | request = mpt3sas_base_get_msg_frame(ioc, smid); | |
3473 | ioc->base_cmds.smid = smid; | |
3474 | memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); | |
3475 | init_completion(&ioc->base_cmds.done); | |
3476 | mpt3sas_base_put_smid_default(ioc, smid); | |
3477 | timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, | |
3478 | msecs_to_jiffies(10000)); | |
3479 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { | |
3480 | pr_err(MPT3SAS_FMT "%s: timeout\n", | |
3481 | ioc->name, __func__); | |
3482 | _debug_dump_mf(mpi_request, | |
3483 | sizeof(Mpi2SepRequest_t)/4); | |
3484 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | |
3485 | issue_reset = 1; | |
3486 | goto issue_host_reset; | |
3487 | } | |
3488 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | |
3489 | memcpy(mpi_reply, ioc->base_cmds.reply, | |
3490 | sizeof(Mpi2SepReply_t)); | |
3491 | else | |
3492 | memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); | |
3493 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
3494 | goto out; | |
3495 | ||
3496 | issue_host_reset: | |
3497 | if (issue_reset) | |
3498 | mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | |
3499 | FORCE_BIG_HAMMER); | |
3500 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
3501 | rc = -EFAULT; | |
3502 | out: | |
3503 | mutex_unlock(&ioc->base_cmds.mutex); | |
3504 | return rc; | |
3505 | } | |
3506 | ||
3507 | /** | |
3508 | * _base_get_port_facts - obtain port facts reply and save in ioc | |
3509 | * @ioc: per adapter object | |
3510 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3511 | * | |
3512 | * Returns 0 for success, non-zero for failure. | |
3513 | */ | |
3514 | static int | |
3515 | _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag) | |
3516 | { | |
3517 | Mpi2PortFactsRequest_t mpi_request; | |
3518 | Mpi2PortFactsReply_t mpi_reply; | |
3519 | struct mpt3sas_port_facts *pfacts; | |
3520 | int mpi_reply_sz, mpi_request_sz, r; | |
3521 | ||
3522 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3523 | __func__)); | |
3524 | ||
3525 | mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); | |
3526 | mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); | |
3527 | memset(&mpi_request, 0, mpi_request_sz); | |
3528 | mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; | |
3529 | mpi_request.PortNumber = port; | |
3530 | r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, | |
3531 | (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); | |
3532 | ||
3533 | if (r != 0) { | |
3534 | pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", | |
3535 | ioc->name, __func__, r); | |
3536 | return r; | |
3537 | } | |
3538 | ||
3539 | pfacts = &ioc->pfacts[port]; | |
3540 | memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); | |
3541 | pfacts->PortNumber = mpi_reply.PortNumber; | |
3542 | pfacts->VP_ID = mpi_reply.VP_ID; | |
3543 | pfacts->VF_ID = mpi_reply.VF_ID; | |
3544 | pfacts->MaxPostedCmdBuffers = | |
3545 | le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); | |
3546 | ||
3547 | return 0; | |
3548 | } | |
3549 | ||
3550 | /** | |
3551 | * _base_get_ioc_facts - obtain ioc facts reply and save in ioc | |
3552 | * @ioc: per adapter object | |
3553 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3554 | * | |
3555 | * Returns 0 for success, non-zero for failure. | |
3556 | */ | |
3557 | static int | |
3558 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
3559 | { | |
3560 | Mpi2IOCFactsRequest_t mpi_request; | |
3561 | Mpi2IOCFactsReply_t mpi_reply; | |
3562 | struct mpt3sas_facts *facts; | |
3563 | int mpi_reply_sz, mpi_request_sz, r; | |
3564 | ||
3565 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3566 | __func__)); | |
3567 | ||
3568 | mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); | |
3569 | mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); | |
3570 | memset(&mpi_request, 0, mpi_request_sz); | |
3571 | mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; | |
3572 | r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, | |
3573 | (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); | |
3574 | ||
3575 | if (r != 0) { | |
3576 | pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", | |
3577 | ioc->name, __func__, r); | |
3578 | return r; | |
3579 | } | |
3580 | ||
3581 | facts = &ioc->facts; | |
3582 | memset(facts, 0, sizeof(struct mpt3sas_facts)); | |
3583 | facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); | |
3584 | facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); | |
3585 | facts->VP_ID = mpi_reply.VP_ID; | |
3586 | facts->VF_ID = mpi_reply.VF_ID; | |
3587 | facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); | |
3588 | facts->MaxChainDepth = mpi_reply.MaxChainDepth; | |
3589 | facts->WhoInit = mpi_reply.WhoInit; | |
3590 | facts->NumberOfPorts = mpi_reply.NumberOfPorts; | |
3591 | facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; | |
3592 | facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); | |
3593 | facts->MaxReplyDescriptorPostQueueDepth = | |
3594 | le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); | |
3595 | facts->ProductID = le16_to_cpu(mpi_reply.ProductID); | |
3596 | facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); | |
3597 | if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) | |
3598 | ioc->ir_firmware = 1; | |
3599 | facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); | |
3600 | facts->IOCRequestFrameSize = | |
3601 | le16_to_cpu(mpi_reply.IOCRequestFrameSize); | |
3602 | facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); | |
3603 | facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); | |
3604 | ioc->shost->max_id = -1; | |
3605 | facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); | |
3606 | facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); | |
3607 | facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); | |
3608 | facts->HighPriorityCredit = | |
3609 | le16_to_cpu(mpi_reply.HighPriorityCredit); | |
3610 | facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; | |
3611 | facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); | |
3612 | ||
3613 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
3614 | "hba queue depth(%d), max chains per io(%d)\n", | |
3615 | ioc->name, facts->RequestCredit, | |
3616 | facts->MaxChainDepth)); | |
3617 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | |
3618 | "request frame size(%d), reply frame size(%d)\n", ioc->name, | |
3619 | facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); | |
3620 | return 0; | |
3621 | } | |
3622 | ||
3623 | /** | |
3624 | * _base_send_ioc_init - send ioc_init to firmware | |
3625 | * @ioc: per adapter object | |
3626 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3627 | * | |
3628 | * Returns 0 for success, non-zero for failure. | |
3629 | */ | |
3630 | static int | |
3631 | _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
3632 | { | |
3633 | Mpi2IOCInitRequest_t mpi_request; | |
3634 | Mpi2IOCInitReply_t mpi_reply; | |
3635 | int r; | |
3636 | struct timeval current_time; | |
3637 | u16 ioc_status; | |
3638 | ||
3639 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3640 | __func__)); | |
3641 | ||
3642 | memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); | |
3643 | mpi_request.Function = MPI2_FUNCTION_IOC_INIT; | |
3644 | mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; | |
3645 | mpi_request.VF_ID = 0; /* TODO */ | |
3646 | mpi_request.VP_ID = 0; | |
3647 | mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); | |
3648 | mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); | |
3649 | ||
3650 | if (_base_is_controller_msix_enabled(ioc)) | |
3651 | mpi_request.HostMSIxVectors = ioc->reply_queue_count; | |
3652 | mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); | |
3653 | mpi_request.ReplyDescriptorPostQueueDepth = | |
3654 | cpu_to_le16(ioc->reply_post_queue_depth); | |
3655 | mpi_request.ReplyFreeQueueDepth = | |
3656 | cpu_to_le16(ioc->reply_free_queue_depth); | |
3657 | ||
3658 | mpi_request.SenseBufferAddressHigh = | |
3659 | cpu_to_le32((u64)ioc->sense_dma >> 32); | |
3660 | mpi_request.SystemReplyAddressHigh = | |
3661 | cpu_to_le32((u64)ioc->reply_dma >> 32); | |
3662 | mpi_request.SystemRequestFrameBaseAddress = | |
3663 | cpu_to_le64((u64)ioc->request_dma); | |
3664 | mpi_request.ReplyFreeQueueAddress = | |
3665 | cpu_to_le64((u64)ioc->reply_free_dma); | |
3666 | mpi_request.ReplyDescriptorPostQueueAddress = | |
3667 | cpu_to_le64((u64)ioc->reply_post_free_dma); | |
3668 | ||
3669 | ||
3670 | /* This time stamp specifies number of milliseconds | |
3671 | * since epoch ~ midnight January 1, 1970. | |
3672 | */ | |
3673 | do_gettimeofday(¤t_time); | |
3674 | mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + | |
3675 | (current_time.tv_usec / 1000)); | |
3676 | ||
3677 | if (ioc->logging_level & MPT_DEBUG_INIT) { | |
3678 | __le32 *mfp; | |
3679 | int i; | |
3680 | ||
3681 | mfp = (__le32 *)&mpi_request; | |
3682 | pr_info("\toffset:data\n"); | |
3683 | for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) | |
3684 | pr_info("\t[0x%02x]:%08x\n", i*4, | |
3685 | le32_to_cpu(mfp[i])); | |
3686 | } | |
3687 | ||
3688 | r = _base_handshake_req_reply_wait(ioc, | |
3689 | sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, | |
3690 | sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, | |
3691 | sleep_flag); | |
3692 | ||
3693 | if (r != 0) { | |
3694 | pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", | |
3695 | ioc->name, __func__, r); | |
3696 | return r; | |
3697 | } | |
3698 | ||
3699 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; | |
3700 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS || | |
3701 | mpi_reply.IOCLogInfo) { | |
3702 | pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); | |
3703 | r = -EIO; | |
3704 | } | |
3705 | ||
3706 | return 0; | |
3707 | } | |
3708 | ||
3709 | /** | |
3710 | * mpt3sas_port_enable_done - command completion routine for port enable | |
3711 | * @ioc: per adapter object | |
3712 | * @smid: system request message index | |
3713 | * @msix_index: MSIX table index supplied by the OS | |
3714 | * @reply: reply message frame(lower 32bit addr) | |
3715 | * | |
3716 | * Return 1 meaning mf should be freed from _base_interrupt | |
3717 | * 0 means the mf is freed from this function. | |
3718 | */ | |
3719 | u8 | |
3720 | mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |
3721 | u32 reply) | |
3722 | { | |
3723 | MPI2DefaultReply_t *mpi_reply; | |
3724 | u16 ioc_status; | |
3725 | ||
3726 | if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) | |
3727 | return 1; | |
3728 | ||
3729 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | |
3730 | if (!mpi_reply) | |
3731 | return 1; | |
3732 | ||
3733 | if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) | |
3734 | return 1; | |
3735 | ||
3736 | ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; | |
3737 | ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; | |
3738 | ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; | |
3739 | memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); | |
3740 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; | |
3741 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | |
3742 | ioc->port_enable_failed = 1; | |
3743 | ||
3744 | if (ioc->is_driver_loading) { | |
3745 | if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { | |
3746 | mpt3sas_port_enable_complete(ioc); | |
3747 | return 1; | |
3748 | } else { | |
3749 | ioc->start_scan_failed = ioc_status; | |
3750 | ioc->start_scan = 0; | |
3751 | return 1; | |
3752 | } | |
3753 | } | |
3754 | complete(&ioc->port_enable_cmds.done); | |
3755 | return 1; | |
3756 | } | |
3757 | ||
3758 | /** | |
3759 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware | |
3760 | * @ioc: per adapter object | |
3761 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3762 | * | |
3763 | * Returns 0 for success, non-zero for failure. | |
3764 | */ | |
3765 | static int | |
3766 | _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
3767 | { | |
3768 | Mpi2PortEnableRequest_t *mpi_request; | |
3769 | Mpi2PortEnableReply_t *mpi_reply; | |
3770 | unsigned long timeleft; | |
3771 | int r = 0; | |
3772 | u16 smid; | |
3773 | u16 ioc_status; | |
3774 | ||
3775 | pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); | |
3776 | ||
3777 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { | |
3778 | pr_err(MPT3SAS_FMT "%s: internal command already in use\n", | |
3779 | ioc->name, __func__); | |
3780 | return -EAGAIN; | |
3781 | } | |
3782 | ||
3783 | smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); | |
3784 | if (!smid) { | |
3785 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
3786 | ioc->name, __func__); | |
3787 | return -EAGAIN; | |
3788 | } | |
3789 | ||
3790 | ioc->port_enable_cmds.status = MPT3_CMD_PENDING; | |
3791 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | |
3792 | ioc->port_enable_cmds.smid = smid; | |
3793 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); | |
3794 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; | |
3795 | ||
3796 | init_completion(&ioc->port_enable_cmds.done); | |
3797 | mpt3sas_base_put_smid_default(ioc, smid); | |
3798 | timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, | |
3799 | 300*HZ); | |
3800 | if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { | |
3801 | pr_err(MPT3SAS_FMT "%s: timeout\n", | |
3802 | ioc->name, __func__); | |
3803 | _debug_dump_mf(mpi_request, | |
3804 | sizeof(Mpi2PortEnableRequest_t)/4); | |
3805 | if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) | |
3806 | r = -EFAULT; | |
3807 | else | |
3808 | r = -ETIME; | |
3809 | goto out; | |
3810 | } | |
3811 | ||
3812 | mpi_reply = ioc->port_enable_cmds.reply; | |
3813 | ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; | |
3814 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | |
3815 | pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", | |
3816 | ioc->name, __func__, ioc_status); | |
3817 | r = -EFAULT; | |
3818 | goto out; | |
3819 | } | |
3820 | ||
3821 | out: | |
3822 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; | |
3823 | pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? | |
3824 | "SUCCESS" : "FAILED")); | |
3825 | return r; | |
3826 | } | |
3827 | ||
3828 | /** | |
3829 | * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) | |
3830 | * @ioc: per adapter object | |
3831 | * | |
3832 | * Returns 0 for success, non-zero for failure. | |
3833 | */ | |
3834 | int | |
3835 | mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) | |
3836 | { | |
3837 | Mpi2PortEnableRequest_t *mpi_request; | |
3838 | u16 smid; | |
3839 | ||
3840 | pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); | |
3841 | ||
3842 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { | |
3843 | pr_err(MPT3SAS_FMT "%s: internal command already in use\n", | |
3844 | ioc->name, __func__); | |
3845 | return -EAGAIN; | |
3846 | } | |
3847 | ||
3848 | smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); | |
3849 | if (!smid) { | |
3850 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
3851 | ioc->name, __func__); | |
3852 | return -EAGAIN; | |
3853 | } | |
3854 | ||
3855 | ioc->port_enable_cmds.status = MPT3_CMD_PENDING; | |
3856 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | |
3857 | ioc->port_enable_cmds.smid = smid; | |
3858 | memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); | |
3859 | mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; | |
3860 | ||
3861 | mpt3sas_base_put_smid_default(ioc, smid); | |
3862 | return 0; | |
3863 | } | |
3864 | ||
3865 | /** | |
3866 | * _base_determine_wait_on_discovery - desposition | |
3867 | * @ioc: per adapter object | |
3868 | * | |
3869 | * Decide whether to wait on discovery to complete. Used to either | |
3870 | * locate boot device, or report volumes ahead of physical devices. | |
3871 | * | |
3872 | * Returns 1 for wait, 0 for don't wait | |
3873 | */ | |
3874 | static int | |
3875 | _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) | |
3876 | { | |
3877 | /* We wait for discovery to complete if IR firmware is loaded. | |
3878 | * The sas topology events arrive before PD events, so we need time to | |
3879 | * turn on the bit in ioc->pd_handles to indicate PD | |
3880 | * Also, it maybe required to report Volumes ahead of physical | |
3881 | * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. | |
3882 | */ | |
3883 | if (ioc->ir_firmware) | |
3884 | return 1; | |
3885 | ||
3886 | /* if no Bios, then we don't need to wait */ | |
3887 | if (!ioc->bios_pg3.BiosVersion) | |
3888 | return 0; | |
3889 | ||
3890 | /* Bios is present, then we drop down here. | |
3891 | * | |
3892 | * If there any entries in the Bios Page 2, then we wait | |
3893 | * for discovery to complete. | |
3894 | */ | |
3895 | ||
3896 | /* Current Boot Device */ | |
3897 | if ((ioc->bios_pg2.CurrentBootDeviceForm & | |
3898 | MPI2_BIOSPAGE2_FORM_MASK) == | |
3899 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && | |
3900 | /* Request Boot Device */ | |
3901 | (ioc->bios_pg2.ReqBootDeviceForm & | |
3902 | MPI2_BIOSPAGE2_FORM_MASK) == | |
3903 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && | |
3904 | /* Alternate Request Boot Device */ | |
3905 | (ioc->bios_pg2.ReqAltBootDeviceForm & | |
3906 | MPI2_BIOSPAGE2_FORM_MASK) == | |
3907 | MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) | |
3908 | return 0; | |
3909 | ||
3910 | return 1; | |
3911 | } | |
3912 | ||
3913 | /** | |
3914 | * _base_unmask_events - turn on notification for this event | |
3915 | * @ioc: per adapter object | |
3916 | * @event: firmware event | |
3917 | * | |
3918 | * The mask is stored in ioc->event_masks. | |
3919 | */ | |
3920 | static void | |
3921 | _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) | |
3922 | { | |
3923 | u32 desired_event; | |
3924 | ||
3925 | if (event >= 128) | |
3926 | return; | |
3927 | ||
3928 | desired_event = (1 << (event % 32)); | |
3929 | ||
3930 | if (event < 32) | |
3931 | ioc->event_masks[0] &= ~desired_event; | |
3932 | else if (event < 64) | |
3933 | ioc->event_masks[1] &= ~desired_event; | |
3934 | else if (event < 96) | |
3935 | ioc->event_masks[2] &= ~desired_event; | |
3936 | else if (event < 128) | |
3937 | ioc->event_masks[3] &= ~desired_event; | |
3938 | } | |
3939 | ||
3940 | /** | |
3941 | * _base_event_notification - send event notification | |
3942 | * @ioc: per adapter object | |
3943 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
3944 | * | |
3945 | * Returns 0 for success, non-zero for failure. | |
3946 | */ | |
3947 | static int | |
3948 | _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
3949 | { | |
3950 | Mpi2EventNotificationRequest_t *mpi_request; | |
3951 | unsigned long timeleft; | |
3952 | u16 smid; | |
3953 | int r = 0; | |
3954 | int i; | |
3955 | ||
3956 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
3957 | __func__)); | |
3958 | ||
3959 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { | |
3960 | pr_err(MPT3SAS_FMT "%s: internal command already in use\n", | |
3961 | ioc->name, __func__); | |
3962 | return -EAGAIN; | |
3963 | } | |
3964 | ||
3965 | smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); | |
3966 | if (!smid) { | |
3967 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", | |
3968 | ioc->name, __func__); | |
3969 | return -EAGAIN; | |
3970 | } | |
3971 | ioc->base_cmds.status = MPT3_CMD_PENDING; | |
3972 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | |
3973 | ioc->base_cmds.smid = smid; | |
3974 | memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); | |
3975 | mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; | |
3976 | mpi_request->VF_ID = 0; /* TODO */ | |
3977 | mpi_request->VP_ID = 0; | |
3978 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) | |
3979 | mpi_request->EventMasks[i] = | |
3980 | cpu_to_le32(ioc->event_masks[i]); | |
3981 | init_completion(&ioc->base_cmds.done); | |
3982 | mpt3sas_base_put_smid_default(ioc, smid); | |
3983 | timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); | |
3984 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { | |
3985 | pr_err(MPT3SAS_FMT "%s: timeout\n", | |
3986 | ioc->name, __func__); | |
3987 | _debug_dump_mf(mpi_request, | |
3988 | sizeof(Mpi2EventNotificationRequest_t)/4); | |
3989 | if (ioc->base_cmds.status & MPT3_CMD_RESET) | |
3990 | r = -EFAULT; | |
3991 | else | |
3992 | r = -ETIME; | |
3993 | } else | |
3994 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", | |
3995 | ioc->name, __func__)); | |
3996 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
3997 | return r; | |
3998 | } | |
3999 | ||
4000 | /** | |
4001 | * mpt3sas_base_validate_event_type - validating event types | |
4002 | * @ioc: per adapter object | |
4003 | * @event: firmware event | |
4004 | * | |
4005 | * This will turn on firmware event notification when application | |
4006 | * ask for that event. We don't mask events that are already enabled. | |
4007 | */ | |
4008 | void | |
4009 | mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) | |
4010 | { | |
4011 | int i, j; | |
4012 | u32 event_mask, desired_event; | |
4013 | u8 send_update_to_fw; | |
4014 | ||
4015 | for (i = 0, send_update_to_fw = 0; i < | |
4016 | MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { | |
4017 | event_mask = ~event_type[i]; | |
4018 | desired_event = 1; | |
4019 | for (j = 0; j < 32; j++) { | |
4020 | if (!(event_mask & desired_event) && | |
4021 | (ioc->event_masks[i] & desired_event)) { | |
4022 | ioc->event_masks[i] &= ~desired_event; | |
4023 | send_update_to_fw = 1; | |
4024 | } | |
4025 | desired_event = (desired_event << 1); | |
4026 | } | |
4027 | } | |
4028 | ||
4029 | if (!send_update_to_fw) | |
4030 | return; | |
4031 | ||
4032 | mutex_lock(&ioc->base_cmds.mutex); | |
4033 | _base_event_notification(ioc, CAN_SLEEP); | |
4034 | mutex_unlock(&ioc->base_cmds.mutex); | |
4035 | } | |
4036 | ||
4037 | /** | |
4038 | * _base_diag_reset - the "big hammer" start of day reset | |
4039 | * @ioc: per adapter object | |
4040 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
4041 | * | |
4042 | * Returns 0 for success, non-zero for failure. | |
4043 | */ | |
4044 | static int | |
4045 | _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
4046 | { | |
4047 | u32 host_diagnostic; | |
4048 | u32 ioc_state; | |
4049 | u32 count; | |
4050 | u32 hcb_size; | |
4051 | ||
4052 | pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); | |
4053 | ||
4054 | drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", | |
4055 | ioc->name)); | |
4056 | ||
4057 | count = 0; | |
4058 | do { | |
4059 | /* Write magic sequence to WriteSequence register | |
4060 | * Loop until in diagnostic mode | |
4061 | */ | |
4062 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4063 | "write magic sequence\n", ioc->name)); | |
4064 | writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); | |
4065 | writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); | |
4066 | writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); | |
4067 | writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); | |
4068 | writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); | |
4069 | writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); | |
4070 | writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); | |
4071 | ||
4072 | /* wait 100 msec */ | |
4073 | if (sleep_flag == CAN_SLEEP) | |
4074 | msleep(100); | |
4075 | else | |
4076 | mdelay(100); | |
4077 | ||
4078 | if (count++ > 20) | |
4079 | goto out; | |
4080 | ||
4081 | host_diagnostic = readl(&ioc->chip->HostDiagnostic); | |
4082 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4083 | "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", | |
4084 | ioc->name, count, host_diagnostic)); | |
4085 | ||
4086 | } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); | |
4087 | ||
4088 | hcb_size = readl(&ioc->chip->HCBSize); | |
4089 | ||
4090 | drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", | |
4091 | ioc->name)); | |
4092 | writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, | |
4093 | &ioc->chip->HostDiagnostic); | |
4094 | ||
4095 | /* don't access any registers for 50 milliseconds */ | |
4096 | msleep(50); | |
4097 | ||
4098 | /* 300 second max wait */ | |
4099 | for (count = 0; count < 3000000 ; count++) { | |
4100 | ||
4101 | host_diagnostic = readl(&ioc->chip->HostDiagnostic); | |
4102 | ||
4103 | if (host_diagnostic == 0xFFFFFFFF) | |
4104 | goto out; | |
4105 | if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) | |
4106 | break; | |
4107 | ||
4108 | /* wait 1 msec */ | |
4109 | if (sleep_flag == CAN_SLEEP) | |
4110 | usleep_range(1000, 1500); | |
4111 | else | |
4112 | mdelay(1); | |
4113 | } | |
4114 | ||
4115 | if (host_diagnostic & MPI2_DIAG_HCB_MODE) { | |
4116 | ||
4117 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4118 | "restart the adapter assuming the HCB Address points to good F/W\n", | |
4119 | ioc->name)); | |
4120 | host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; | |
4121 | host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; | |
4122 | writel(host_diagnostic, &ioc->chip->HostDiagnostic); | |
4123 | ||
4124 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4125 | "re-enable the HCDW\n", ioc->name)); | |
4126 | writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, | |
4127 | &ioc->chip->HCBSize); | |
4128 | } | |
4129 | ||
4130 | drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", | |
4131 | ioc->name)); | |
4132 | writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, | |
4133 | &ioc->chip->HostDiagnostic); | |
4134 | ||
4135 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4136 | "disable writes to the diagnostic register\n", ioc->name)); | |
4137 | writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); | |
4138 | ||
4139 | drsprintk(ioc, pr_info(MPT3SAS_FMT | |
4140 | "Wait for FW to go to the READY state\n", ioc->name)); | |
4141 | ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, | |
4142 | sleep_flag); | |
4143 | if (ioc_state) { | |
4144 | pr_err(MPT3SAS_FMT | |
4145 | "%s: failed going to ready state (ioc_state=0x%x)\n", | |
4146 | ioc->name, __func__, ioc_state); | |
4147 | goto out; | |
4148 | } | |
4149 | ||
4150 | pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); | |
4151 | return 0; | |
4152 | ||
4153 | out: | |
4154 | pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); | |
4155 | return -EFAULT; | |
4156 | } | |
4157 | ||
4158 | /** | |
4159 | * _base_make_ioc_ready - put controller in READY state | |
4160 | * @ioc: per adapter object | |
4161 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
4162 | * @type: FORCE_BIG_HAMMER or SOFT_RESET | |
4163 | * | |
4164 | * Returns 0 for success, non-zero for failure. | |
4165 | */ | |
4166 | static int | |
4167 | _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, | |
4168 | enum reset_type type) | |
4169 | { | |
4170 | u32 ioc_state; | |
4171 | int rc; | |
4172 | int count; | |
4173 | ||
4174 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
4175 | __func__)); | |
4176 | ||
4177 | if (ioc->pci_error_recovery) | |
4178 | return 0; | |
4179 | ||
4180 | ioc_state = mpt3sas_base_get_iocstate(ioc, 0); | |
4181 | dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", | |
4182 | ioc->name, __func__, ioc_state)); | |
4183 | ||
4184 | /* if in RESET state, it should move to READY state shortly */ | |
4185 | count = 0; | |
4186 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { | |
4187 | while ((ioc_state & MPI2_IOC_STATE_MASK) != | |
4188 | MPI2_IOC_STATE_READY) { | |
4189 | if (count++ == 10) { | |
4190 | pr_err(MPT3SAS_FMT | |
4191 | "%s: failed going to ready state (ioc_state=0x%x)\n", | |
4192 | ioc->name, __func__, ioc_state); | |
4193 | return -EFAULT; | |
4194 | } | |
4195 | if (sleep_flag == CAN_SLEEP) | |
4196 | ssleep(1); | |
4197 | else | |
4198 | mdelay(1000); | |
4199 | ioc_state = mpt3sas_base_get_iocstate(ioc, 0); | |
4200 | } | |
4201 | } | |
4202 | ||
4203 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) | |
4204 | return 0; | |
4205 | ||
4206 | if (ioc_state & MPI2_DOORBELL_USED) { | |
4207 | dhsprintk(ioc, pr_info(MPT3SAS_FMT | |
4208 | "unexpected doorbell active!\n", | |
4209 | ioc->name)); | |
4210 | goto issue_diag_reset; | |
4211 | } | |
4212 | ||
4213 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { | |
4214 | mpt3sas_base_fault_info(ioc, ioc_state & | |
4215 | MPI2_DOORBELL_DATA_MASK); | |
4216 | goto issue_diag_reset; | |
4217 | } | |
4218 | ||
4219 | if (type == FORCE_BIG_HAMMER) | |
4220 | goto issue_diag_reset; | |
4221 | ||
4222 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) | |
4223 | if (!(_base_send_ioc_reset(ioc, | |
4224 | MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { | |
4225 | return 0; | |
4226 | } | |
4227 | ||
4228 | issue_diag_reset: | |
4229 | rc = _base_diag_reset(ioc, CAN_SLEEP); | |
4230 | return rc; | |
4231 | } | |
4232 | ||
4233 | /** | |
4234 | * _base_make_ioc_operational - put controller in OPERATIONAL state | |
4235 | * @ioc: per adapter object | |
4236 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
4237 | * | |
4238 | * Returns 0 for success, non-zero for failure. | |
4239 | */ | |
4240 | static int | |
4241 | _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
4242 | { | |
4243 | int r, i; | |
4244 | unsigned long flags; | |
4245 | u32 reply_address; | |
4246 | u16 smid; | |
4247 | struct _tr_list *delayed_tr, *delayed_tr_next; | |
4248 | struct adapter_reply_queue *reply_q; | |
4249 | long reply_post_free; | |
4250 | u32 reply_post_free_sz; | |
4251 | ||
4252 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
4253 | __func__)); | |
4254 | ||
4255 | /* clean the delayed target reset list */ | |
4256 | list_for_each_entry_safe(delayed_tr, delayed_tr_next, | |
4257 | &ioc->delayed_tr_list, list) { | |
4258 | list_del(&delayed_tr->list); | |
4259 | kfree(delayed_tr); | |
4260 | } | |
4261 | ||
4262 | ||
4263 | list_for_each_entry_safe(delayed_tr, delayed_tr_next, | |
4264 | &ioc->delayed_tr_volume_list, list) { | |
4265 | list_del(&delayed_tr->list); | |
4266 | kfree(delayed_tr); | |
4267 | } | |
4268 | ||
4269 | /* initialize the scsi lookup free list */ | |
4270 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
4271 | INIT_LIST_HEAD(&ioc->free_list); | |
4272 | smid = 1; | |
4273 | for (i = 0; i < ioc->scsiio_depth; i++, smid++) { | |
4274 | INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); | |
4275 | ioc->scsi_lookup[i].cb_idx = 0xFF; | |
4276 | ioc->scsi_lookup[i].smid = smid; | |
4277 | ioc->scsi_lookup[i].scmd = NULL; | |
4278 | list_add_tail(&ioc->scsi_lookup[i].tracker_list, | |
4279 | &ioc->free_list); | |
4280 | } | |
4281 | ||
4282 | /* hi-priority queue */ | |
4283 | INIT_LIST_HEAD(&ioc->hpr_free_list); | |
4284 | smid = ioc->hi_priority_smid; | |
4285 | for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { | |
4286 | ioc->hpr_lookup[i].cb_idx = 0xFF; | |
4287 | ioc->hpr_lookup[i].smid = smid; | |
4288 | list_add_tail(&ioc->hpr_lookup[i].tracker_list, | |
4289 | &ioc->hpr_free_list); | |
4290 | } | |
4291 | ||
4292 | /* internal queue */ | |
4293 | INIT_LIST_HEAD(&ioc->internal_free_list); | |
4294 | smid = ioc->internal_smid; | |
4295 | for (i = 0; i < ioc->internal_depth; i++, smid++) { | |
4296 | ioc->internal_lookup[i].cb_idx = 0xFF; | |
4297 | ioc->internal_lookup[i].smid = smid; | |
4298 | list_add_tail(&ioc->internal_lookup[i].tracker_list, | |
4299 | &ioc->internal_free_list); | |
4300 | } | |
4301 | ||
4302 | /* chain pool */ | |
4303 | INIT_LIST_HEAD(&ioc->free_chain_list); | |
4304 | for (i = 0; i < ioc->chain_depth; i++) | |
4305 | list_add_tail(&ioc->chain_lookup[i].tracker_list, | |
4306 | &ioc->free_chain_list); | |
4307 | ||
4308 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
4309 | ||
4310 | /* initialize Reply Free Queue */ | |
4311 | for (i = 0, reply_address = (u32)ioc->reply_dma ; | |
4312 | i < ioc->reply_free_queue_depth ; i++, reply_address += | |
4313 | ioc->reply_sz) | |
4314 | ioc->reply_free[i] = cpu_to_le32(reply_address); | |
4315 | ||
4316 | /* initialize reply queues */ | |
4317 | if (ioc->is_driver_loading) | |
4318 | _base_assign_reply_queues(ioc); | |
4319 | ||
4320 | /* initialize Reply Post Free Queue */ | |
4321 | reply_post_free = (long)ioc->reply_post_free; | |
4322 | reply_post_free_sz = ioc->reply_post_queue_depth * | |
4323 | sizeof(Mpi2DefaultReplyDescriptor_t); | |
4324 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | |
4325 | reply_q->reply_post_host_index = 0; | |
4326 | reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) | |
4327 | reply_post_free; | |
4328 | for (i = 0; i < ioc->reply_post_queue_depth; i++) | |
4329 | reply_q->reply_post_free[i].Words = | |
4330 | cpu_to_le64(ULLONG_MAX); | |
4331 | if (!_base_is_controller_msix_enabled(ioc)) | |
4332 | goto skip_init_reply_post_free_queue; | |
4333 | reply_post_free += reply_post_free_sz; | |
4334 | } | |
4335 | skip_init_reply_post_free_queue: | |
4336 | ||
4337 | r = _base_send_ioc_init(ioc, sleep_flag); | |
4338 | if (r) | |
4339 | return r; | |
4340 | ||
4341 | /* initialize reply free host index */ | |
4342 | ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; | |
4343 | writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); | |
4344 | ||
4345 | /* initialize reply post host index */ | |
4346 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | |
4347 | writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, | |
4348 | &ioc->chip->ReplyPostHostIndex); | |
4349 | if (!_base_is_controller_msix_enabled(ioc)) | |
4350 | goto skip_init_reply_post_host_index; | |
4351 | } | |
4352 | ||
4353 | skip_init_reply_post_host_index: | |
4354 | ||
4355 | _base_unmask_interrupts(ioc); | |
4356 | r = _base_event_notification(ioc, sleep_flag); | |
4357 | if (r) | |
4358 | return r; | |
4359 | ||
4360 | if (sleep_flag == CAN_SLEEP) | |
4361 | _base_static_config_pages(ioc); | |
4362 | ||
4363 | ||
4364 | if (ioc->is_driver_loading) { | |
4365 | ioc->wait_for_discovery_to_complete = | |
4366 | _base_determine_wait_on_discovery(ioc); | |
4367 | ||
4368 | return r; /* scan_start and scan_finished support */ | |
4369 | } | |
4370 | ||
4371 | r = _base_send_port_enable(ioc, sleep_flag); | |
4372 | if (r) | |
4373 | return r; | |
4374 | ||
4375 | return r; | |
4376 | } | |
4377 | ||
4378 | /** | |
4379 | * mpt3sas_base_free_resources - free resources controller resources | |
4380 | * @ioc: per adapter object | |
4381 | * | |
4382 | * Return nothing. | |
4383 | */ | |
4384 | void | |
4385 | mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) | |
4386 | { | |
4387 | struct pci_dev *pdev = ioc->pdev; | |
4388 | ||
4389 | dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
4390 | __func__)); | |
4391 | ||
4392 | _base_mask_interrupts(ioc); | |
4393 | ioc->shost_recovery = 1; | |
4394 | _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); | |
4395 | ioc->shost_recovery = 0; | |
4396 | _base_free_irq(ioc); | |
4397 | _base_disable_msix(ioc); | |
4398 | if (ioc->chip_phys) | |
4399 | iounmap(ioc->chip); | |
4400 | ioc->chip_phys = 0; | |
4401 | pci_release_selected_regions(ioc->pdev, ioc->bars); | |
4402 | pci_disable_pcie_error_reporting(pdev); | |
4403 | pci_disable_device(pdev); | |
4404 | return; | |
4405 | } | |
4406 | ||
4407 | /** | |
4408 | * mpt3sas_base_attach - attach controller instance | |
4409 | * @ioc: per adapter object | |
4410 | * | |
4411 | * Returns 0 for success, non-zero for failure. | |
4412 | */ | |
4413 | int | |
4414 | mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |
4415 | { | |
4416 | int r, i; | |
4417 | int cpu_id, last_cpu_id = 0; | |
4418 | ||
4419 | dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
4420 | __func__)); | |
4421 | ||
4422 | /* setup cpu_msix_table */ | |
4423 | ioc->cpu_count = num_online_cpus(); | |
4424 | for_each_online_cpu(cpu_id) | |
4425 | last_cpu_id = cpu_id; | |
4426 | ioc->cpu_msix_table_sz = last_cpu_id + 1; | |
4427 | ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); | |
4428 | ioc->reply_queue_count = 1; | |
4429 | if (!ioc->cpu_msix_table) { | |
4430 | dfailprintk(ioc, pr_info(MPT3SAS_FMT | |
4431 | "allocation for cpu_msix_table failed!!!\n", | |
4432 | ioc->name)); | |
4433 | r = -ENOMEM; | |
4434 | goto out_free_resources; | |
4435 | } | |
4436 | ||
4437 | r = mpt3sas_base_map_resources(ioc); | |
4438 | if (r) | |
4439 | goto out_free_resources; | |
4440 | ||
4441 | ||
4442 | pci_set_drvdata(ioc->pdev, ioc->shost); | |
4443 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | |
4444 | if (r) | |
4445 | goto out_free_resources; | |
4446 | ||
4447 | /* | |
4448 | * In SAS3.0, | |
4449 | * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and | |
4450 | * Target Status - all require the IEEE formated scatter gather | |
4451 | * elements. | |
4452 | */ | |
4453 | ||
4454 | ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; | |
4455 | ioc->build_sg = &_base_build_sg_ieee; | |
4456 | ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; | |
4457 | ioc->mpi25 = 1; | |
4458 | ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); | |
4459 | ||
4460 | /* | |
4461 | * These function pointers for other requests that don't | |
4462 | * the require IEEE scatter gather elements. | |
4463 | * | |
4464 | * For example Configuration Pages and SAS IOUNIT Control don't. | |
4465 | */ | |
4466 | ioc->build_sg_mpi = &_base_build_sg; | |
4467 | ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; | |
4468 | ||
4469 | r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); | |
4470 | if (r) | |
4471 | goto out_free_resources; | |
4472 | ||
4473 | ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, | |
4474 | sizeof(struct mpt3sas_port_facts), GFP_KERNEL); | |
4475 | if (!ioc->pfacts) { | |
4476 | r = -ENOMEM; | |
4477 | goto out_free_resources; | |
4478 | } | |
4479 | ||
4480 | for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { | |
4481 | r = _base_get_port_facts(ioc, i, CAN_SLEEP); | |
4482 | if (r) | |
4483 | goto out_free_resources; | |
4484 | } | |
4485 | ||
4486 | r = _base_allocate_memory_pools(ioc, CAN_SLEEP); | |
4487 | if (r) | |
4488 | goto out_free_resources; | |
4489 | ||
4490 | init_waitqueue_head(&ioc->reset_wq); | |
4491 | ||
4492 | /* allocate memory pd handle bitmask list */ | |
4493 | ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); | |
4494 | if (ioc->facts.MaxDevHandle % 8) | |
4495 | ioc->pd_handles_sz++; | |
4496 | ioc->pd_handles = kzalloc(ioc->pd_handles_sz, | |
4497 | GFP_KERNEL); | |
4498 | if (!ioc->pd_handles) { | |
4499 | r = -ENOMEM; | |
4500 | goto out_free_resources; | |
4501 | } | |
4502 | ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, | |
4503 | GFP_KERNEL); | |
4504 | if (!ioc->blocking_handles) { | |
4505 | r = -ENOMEM; | |
4506 | goto out_free_resources; | |
4507 | } | |
4508 | ||
4509 | ioc->fwfault_debug = mpt3sas_fwfault_debug; | |
4510 | ||
4511 | /* base internal command bits */ | |
4512 | mutex_init(&ioc->base_cmds.mutex); | |
4513 | ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4514 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | |
4515 | ||
4516 | /* port_enable command bits */ | |
4517 | ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4518 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; | |
4519 | ||
4520 | /* transport internal command bits */ | |
4521 | ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4522 | ioc->transport_cmds.status = MPT3_CMD_NOT_USED; | |
4523 | mutex_init(&ioc->transport_cmds.mutex); | |
4524 | ||
4525 | /* scsih internal command bits */ | |
4526 | ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4527 | ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; | |
4528 | mutex_init(&ioc->scsih_cmds.mutex); | |
4529 | ||
4530 | /* task management internal command bits */ | |
4531 | ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4532 | ioc->tm_cmds.status = MPT3_CMD_NOT_USED; | |
4533 | mutex_init(&ioc->tm_cmds.mutex); | |
4534 | ||
4535 | /* config page internal command bits */ | |
4536 | ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4537 | ioc->config_cmds.status = MPT3_CMD_NOT_USED; | |
4538 | mutex_init(&ioc->config_cmds.mutex); | |
4539 | ||
4540 | /* ctl module internal command bits */ | |
4541 | ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | |
4542 | ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); | |
4543 | ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; | |
4544 | mutex_init(&ioc->ctl_cmds.mutex); | |
4545 | ||
4546 | if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || | |
4547 | !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || | |
4548 | !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || | |
4549 | !ioc->ctl_cmds.sense) { | |
4550 | r = -ENOMEM; | |
4551 | goto out_free_resources; | |
4552 | } | |
4553 | ||
4554 | for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) | |
4555 | ioc->event_masks[i] = -1; | |
4556 | ||
4557 | /* here we enable the events we care about */ | |
4558 | _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); | |
4559 | _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); | |
4560 | _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); | |
4561 | _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); | |
4562 | _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); | |
4563 | _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); | |
4564 | _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); | |
4565 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | |
4566 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | |
4567 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | |
4568 | ||
4569 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | |
4570 | if (r) | |
4571 | goto out_free_resources; | |
4572 | ||
4573 | return 0; | |
4574 | ||
4575 | out_free_resources: | |
4576 | ||
4577 | ioc->remove_host = 1; | |
4578 | ||
4579 | mpt3sas_base_free_resources(ioc); | |
4580 | _base_release_memory_pools(ioc); | |
4581 | pci_set_drvdata(ioc->pdev, NULL); | |
4582 | kfree(ioc->cpu_msix_table); | |
4583 | kfree(ioc->pd_handles); | |
4584 | kfree(ioc->blocking_handles); | |
4585 | kfree(ioc->tm_cmds.reply); | |
4586 | kfree(ioc->transport_cmds.reply); | |
4587 | kfree(ioc->scsih_cmds.reply); | |
4588 | kfree(ioc->config_cmds.reply); | |
4589 | kfree(ioc->base_cmds.reply); | |
4590 | kfree(ioc->port_enable_cmds.reply); | |
4591 | kfree(ioc->ctl_cmds.reply); | |
4592 | kfree(ioc->ctl_cmds.sense); | |
4593 | kfree(ioc->pfacts); | |
4594 | ioc->ctl_cmds.reply = NULL; | |
4595 | ioc->base_cmds.reply = NULL; | |
4596 | ioc->tm_cmds.reply = NULL; | |
4597 | ioc->scsih_cmds.reply = NULL; | |
4598 | ioc->transport_cmds.reply = NULL; | |
4599 | ioc->config_cmds.reply = NULL; | |
4600 | ioc->pfacts = NULL; | |
4601 | return r; | |
4602 | } | |
4603 | ||
4604 | ||
4605 | /** | |
4606 | * mpt3sas_base_detach - remove controller instance | |
4607 | * @ioc: per adapter object | |
4608 | * | |
4609 | * Return nothing. | |
4610 | */ | |
4611 | void | |
4612 | mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) | |
4613 | { | |
4614 | dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, | |
4615 | __func__)); | |
4616 | ||
4617 | mpt3sas_base_stop_watchdog(ioc); | |
4618 | mpt3sas_base_free_resources(ioc); | |
4619 | _base_release_memory_pools(ioc); | |
4620 | pci_set_drvdata(ioc->pdev, NULL); | |
4621 | kfree(ioc->cpu_msix_table); | |
4622 | kfree(ioc->pd_handles); | |
4623 | kfree(ioc->blocking_handles); | |
4624 | kfree(ioc->pfacts); | |
4625 | kfree(ioc->ctl_cmds.reply); | |
4626 | kfree(ioc->ctl_cmds.sense); | |
4627 | kfree(ioc->base_cmds.reply); | |
4628 | kfree(ioc->port_enable_cmds.reply); | |
4629 | kfree(ioc->tm_cmds.reply); | |
4630 | kfree(ioc->transport_cmds.reply); | |
4631 | kfree(ioc->scsih_cmds.reply); | |
4632 | kfree(ioc->config_cmds.reply); | |
4633 | } | |
4634 | ||
4635 | /** | |
4636 | * _base_reset_handler - reset callback handler (for base) | |
4637 | * @ioc: per adapter object | |
4638 | * @reset_phase: phase | |
4639 | * | |
4640 | * The handler for doing any required cleanup or initialization. | |
4641 | * | |
4642 | * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, | |
4643 | * MPT3_IOC_DONE_RESET | |
4644 | * | |
4645 | * Return nothing. | |
4646 | */ | |
4647 | static void | |
4648 | _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | |
4649 | { | |
4650 | mpt3sas_scsih_reset_handler(ioc, reset_phase); | |
4651 | mpt3sas_ctl_reset_handler(ioc, reset_phase); | |
4652 | switch (reset_phase) { | |
4653 | case MPT3_IOC_PRE_RESET: | |
4654 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | |
4655 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); | |
4656 | break; | |
4657 | case MPT3_IOC_AFTER_RESET: | |
4658 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | |
4659 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); | |
4660 | if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { | |
4661 | ioc->transport_cmds.status |= MPT3_CMD_RESET; | |
4662 | mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); | |
4663 | complete(&ioc->transport_cmds.done); | |
4664 | } | |
4665 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { | |
4666 | ioc->base_cmds.status |= MPT3_CMD_RESET; | |
4667 | mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); | |
4668 | complete(&ioc->base_cmds.done); | |
4669 | } | |
4670 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { | |
4671 | ioc->port_enable_failed = 1; | |
4672 | ioc->port_enable_cmds.status |= MPT3_CMD_RESET; | |
4673 | mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); | |
4674 | if (ioc->is_driver_loading) { | |
4675 | ioc->start_scan_failed = | |
4676 | MPI2_IOCSTATUS_INTERNAL_ERROR; | |
4677 | ioc->start_scan = 0; | |
4678 | ioc->port_enable_cmds.status = | |
4679 | MPT3_CMD_NOT_USED; | |
4680 | } else | |
4681 | complete(&ioc->port_enable_cmds.done); | |
4682 | } | |
4683 | if (ioc->config_cmds.status & MPT3_CMD_PENDING) { | |
4684 | ioc->config_cmds.status |= MPT3_CMD_RESET; | |
4685 | mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); | |
4686 | ioc->config_cmds.smid = USHRT_MAX; | |
4687 | complete(&ioc->config_cmds.done); | |
4688 | } | |
4689 | break; | |
4690 | case MPT3_IOC_DONE_RESET: | |
4691 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | |
4692 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); | |
4693 | break; | |
4694 | } | |
4695 | } | |
4696 | ||
4697 | /** | |
4698 | * _wait_for_commands_to_complete - reset controller | |
4699 | * @ioc: Pointer to MPT_ADAPTER structure | |
4700 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
4701 | * | |
4702 | * This function waiting(3s) for all pending commands to complete | |
4703 | * prior to putting controller in reset. | |
4704 | */ | |
4705 | static void | |
4706 | _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |
4707 | { | |
4708 | u32 ioc_state; | |
4709 | unsigned long flags; | |
4710 | u16 i; | |
4711 | ||
4712 | ioc->pending_io_count = 0; | |
4713 | if (sleep_flag != CAN_SLEEP) | |
4714 | return; | |
4715 | ||
4716 | ioc_state = mpt3sas_base_get_iocstate(ioc, 0); | |
4717 | if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) | |
4718 | return; | |
4719 | ||
4720 | /* pending command count */ | |
4721 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | |
4722 | for (i = 0; i < ioc->scsiio_depth; i++) | |
4723 | if (ioc->scsi_lookup[i].cb_idx != 0xFF) | |
4724 | ioc->pending_io_count++; | |
4725 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | |
4726 | ||
4727 | if (!ioc->pending_io_count) | |
4728 | return; | |
4729 | ||
4730 | /* wait for pending commands to complete */ | |
4731 | wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); | |
4732 | } | |
4733 | ||
4734 | /** | |
4735 | * mpt3sas_base_hard_reset_handler - reset controller | |
4736 | * @ioc: Pointer to MPT_ADAPTER structure | |
4737 | * @sleep_flag: CAN_SLEEP or NO_SLEEP | |
4738 | * @type: FORCE_BIG_HAMMER or SOFT_RESET | |
4739 | * | |
4740 | * Returns 0 for success, non-zero for failure. | |
4741 | */ | |
4742 | int | |
4743 | mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, | |
4744 | enum reset_type type) | |
4745 | { | |
4746 | int r; | |
4747 | unsigned long flags; | |
4748 | u32 ioc_state; | |
4749 | u8 is_fault = 0, is_trigger = 0; | |
4750 | ||
4751 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, | |
4752 | __func__)); | |
4753 | ||
4754 | if (ioc->pci_error_recovery) { | |
4755 | pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", | |
4756 | ioc->name, __func__); | |
4757 | r = 0; | |
4758 | goto out_unlocked; | |
4759 | } | |
4760 | ||
4761 | if (mpt3sas_fwfault_debug) | |
4762 | mpt3sas_halt_firmware(ioc); | |
4763 | ||
4764 | /* TODO - What we really should be doing is pulling | |
4765 | * out all the code associated with NO_SLEEP; its never used. | |
4766 | * That is legacy code from mpt fusion driver, ported over. | |
4767 | * I will leave this BUG_ON here for now till its been resolved. | |
4768 | */ | |
4769 | BUG_ON(sleep_flag == NO_SLEEP); | |
4770 | ||
4771 | /* wait for an active reset in progress to complete */ | |
4772 | if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { | |
4773 | do { | |
4774 | ssleep(1); | |
4775 | } while (ioc->shost_recovery == 1); | |
4776 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, | |
4777 | __func__)); | |
4778 | return ioc->ioc_reset_in_progress_status; | |
4779 | } | |
4780 | ||
4781 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
4782 | ioc->shost_recovery = 1; | |
4783 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
4784 | ||
4785 | if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & | |
4786 | MPT3_DIAG_BUFFER_IS_REGISTERED) && | |
4787 | (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & | |
4788 | MPT3_DIAG_BUFFER_IS_RELEASED))) { | |
4789 | is_trigger = 1; | |
4790 | ioc_state = mpt3sas_base_get_iocstate(ioc, 0); | |
4791 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) | |
4792 | is_fault = 1; | |
4793 | } | |
4794 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); | |
4795 | _wait_for_commands_to_complete(ioc, sleep_flag); | |
4796 | _base_mask_interrupts(ioc); | |
4797 | r = _base_make_ioc_ready(ioc, sleep_flag, type); | |
4798 | if (r) | |
4799 | goto out; | |
4800 | _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); | |
4801 | ||
4802 | /* If this hard reset is called while port enable is active, then | |
4803 | * there is no reason to call make_ioc_operational | |
4804 | */ | |
4805 | if (ioc->is_driver_loading && ioc->port_enable_failed) { | |
4806 | ioc->remove_host = 1; | |
4807 | r = -EFAULT; | |
4808 | goto out; | |
4809 | } | |
4810 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | |
4811 | if (r) | |
4812 | goto out; | |
4813 | r = _base_make_ioc_operational(ioc, sleep_flag); | |
4814 | if (!r) | |
4815 | _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); | |
4816 | ||
4817 | out: | |
4818 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", | |
4819 | ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); | |
4820 | ||
4821 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | |
4822 | ioc->ioc_reset_in_progress_status = r; | |
4823 | ioc->shost_recovery = 0; | |
4824 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | |
4825 | ioc->ioc_reset_count++; | |
4826 | mutex_unlock(&ioc->reset_in_progress_mutex); | |
4827 | ||
4828 | out_unlocked: | |
4829 | if ((r == 0) && is_trigger) { | |
4830 | if (is_fault) | |
4831 | mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); | |
4832 | else | |
4833 | mpt3sas_trigger_master(ioc, | |
4834 | MASTER_TRIGGER_ADAPTER_RESET); | |
4835 | } | |
4836 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, | |
4837 | __func__)); | |
4838 | return r; | |
4839 | } |