Add missing newlines to some uses of dev_<level> messages
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
9413afff 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e 28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
f888ba3c 31#include <scsi/scsi_transport_fc.h>
dea3101e 32
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_crtn.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_compat.h"
858c9f6c 41#include "lpfc_debugfs.h"
dea3101e 42
43/*
44 * Define macro to log: Mailbox command x%x cannot issue Data
45 * This allows multiple uses of lpfc_msgBlk0311
46 * w/o perturbing log msg utility.
47 */
92d7f7b0 48#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
dea3101e 49 lpfc_printf_log(phba, \
50 KERN_INFO, \
51 LOG_MBOX | LOG_SLI, \
e8b62011 52 "(%d):0311 Mailbox command x%x cannot " \
92d7f7b0 53 "issue Data: x%x x%x x%x\n", \
92d7f7b0
JS
54 pmbox->vport ? pmbox->vport->vpi : 0, \
55 pmbox->mb.mbxCommand, \
2e0fef85 56 phba->pport->port_state, \
dea3101e 57 psli->sli_flag, \
2e0fef85 58 flag)
dea3101e 59
60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
92d7f7b0
JS
69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
70 * to the start of the ring, and the slot number of the
71 * desired iocb entry, calc a pointer to that entry.
72 */
ed957684
JS
73static inline IOCB_t *
74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75{
76 return (IOCB_t *) (((char *) pring->cmdringaddr) +
77 pring->cmdidx * phba->iocb_cmd_size);
78}
79
80static inline IOCB_t *
81lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82{
83 return (IOCB_t *) (((char *) pring->rspringaddr) +
84 pring->rspidx * phba->iocb_rsp_size);
85}
86
2e0fef85
JS
87static struct lpfc_iocbq *
88__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
89{
90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91 struct lpfc_iocbq * iocbq = NULL;
92
93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
94 return iocbq;
95}
96
2e0fef85
JS
97struct lpfc_iocbq *
98lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99{
100 struct lpfc_iocbq * iocbq = NULL;
101 unsigned long iflags;
102
103 spin_lock_irqsave(&phba->hbalock, iflags);
104 iocbq = __lpfc_sli_get_iocbq(phba);
105 spin_unlock_irqrestore(&phba->hbalock, iflags);
106 return iocbq;
107}
108
604a3e30 109void
2e0fef85 110__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 111{
2e0fef85 112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
113
114 /*
115 * Clean all volatile data fields, preserve iotag and node struct.
116 */
117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119}
120
2e0fef85
JS
121void
122lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123{
124 unsigned long iflags;
125
126 /*
127 * Clean all volatile data fields, preserve iotag and node struct.
128 */
129 spin_lock_irqsave(&phba->hbalock, iflags);
130 __lpfc_sli_release_iocbq(phba, iocbq);
131 spin_unlock_irqrestore(&phba->hbalock, iflags);
132}
133
dea3101e 134/*
135 * Translate the iocb command to an iocb command type used to decide the final
136 * disposition of each completed IOCB.
137 */
138static lpfc_iocb_type
139lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140{
141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
142
143 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
144 return 0;
145
146 switch (iocb_cmnd) {
147 case CMD_XMIT_SEQUENCE_CR:
148 case CMD_XMIT_SEQUENCE_CX:
149 case CMD_XMIT_BCAST_CN:
150 case CMD_XMIT_BCAST_CX:
151 case CMD_ELS_REQUEST_CR:
152 case CMD_ELS_REQUEST_CX:
153 case CMD_CREATE_XRI_CR:
154 case CMD_CREATE_XRI_CX:
155 case CMD_GET_RPI_CN:
156 case CMD_XMIT_ELS_RSP_CX:
157 case CMD_GET_RPI_CR:
158 case CMD_FCP_IWRITE_CR:
159 case CMD_FCP_IWRITE_CX:
160 case CMD_FCP_IREAD_CR:
161 case CMD_FCP_IREAD_CX:
162 case CMD_FCP_ICMND_CR:
163 case CMD_FCP_ICMND_CX:
f5603511
JS
164 case CMD_FCP_TSEND_CX:
165 case CMD_FCP_TRSP_CX:
166 case CMD_FCP_TRECEIVE_CX:
167 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 168 case CMD_ADAPTER_MSG:
169 case CMD_ADAPTER_DUMP:
170 case CMD_XMIT_SEQUENCE64_CR:
171 case CMD_XMIT_SEQUENCE64_CX:
172 case CMD_XMIT_BCAST64_CN:
173 case CMD_XMIT_BCAST64_CX:
174 case CMD_ELS_REQUEST64_CR:
175 case CMD_ELS_REQUEST64_CX:
176 case CMD_FCP_IWRITE64_CR:
177 case CMD_FCP_IWRITE64_CX:
178 case CMD_FCP_IREAD64_CR:
179 case CMD_FCP_IREAD64_CX:
180 case CMD_FCP_ICMND64_CR:
181 case CMD_FCP_ICMND64_CX:
f5603511
JS
182 case CMD_FCP_TSEND64_CX:
183 case CMD_FCP_TRSP64_CX:
184 case CMD_FCP_TRECEIVE64_CX:
dea3101e 185 case CMD_GEN_REQUEST64_CR:
186 case CMD_GEN_REQUEST64_CX:
187 case CMD_XMIT_ELS_RSP64_CX:
188 type = LPFC_SOL_IOCB;
189 break;
190 case CMD_ABORT_XRI_CN:
191 case CMD_ABORT_XRI_CX:
192 case CMD_CLOSE_XRI_CN:
193 case CMD_CLOSE_XRI_CX:
194 case CMD_XRI_ABORTED_CX:
195 case CMD_ABORT_MXRI64_CN:
196 type = LPFC_ABORT_IOCB;
197 break;
198 case CMD_RCV_SEQUENCE_CX:
199 case CMD_RCV_ELS_REQ_CX:
200 case CMD_RCV_SEQUENCE64_CX:
201 case CMD_RCV_ELS_REQ64_CX:
ed957684
JS
202 case CMD_IOCB_RCV_SEQ64_CX:
203 case CMD_IOCB_RCV_ELS64_CX:
204 case CMD_IOCB_RCV_CONT64_CX:
dea3101e 205 type = LPFC_UNSOL_IOCB;
206 break;
207 default:
208 type = LPFC_UNKNOWN_IOCB;
209 break;
210 }
211
212 return type;
213}
214
215static int
ed957684 216lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 217{
218 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
219 LPFC_MBOXQ_t *pmb;
220 MAILBOX_t *pmbox;
221 int i, rc, ret = 0;
dea3101e 222
ed957684
JS
223 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
224 if (!pmb)
225 return -ENOMEM;
226 pmbox = &pmb->mb;
227 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 228 for (i = 0; i < psli->num_rings; i++) {
dea3101e 229 lpfc_config_ring(phba, i, pmb);
230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231 if (rc != MBX_SUCCESS) {
92d7f7b0 232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 233 "0446 Adapter failed to init (%d), "
dea3101e 234 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
235 "ring %d\n",
e8b62011
JS
236 rc, pmbox->mbxCommand,
237 pmbox->mbxStatus, i);
2e0fef85 238 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
239 ret = -ENXIO;
240 break;
dea3101e 241 }
242 }
ed957684
JS
243 mempool_free(pmb, phba->mbox_mem_pool);
244 return ret;
dea3101e 245}
246
247static int
2e0fef85
JS
248lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
249 struct lpfc_iocbq *piocb)
dea3101e 250{
dea3101e 251 list_add_tail(&piocb->list, &pring->txcmplq);
252 pring->txcmplq_cnt++;
92d7f7b0
JS
253 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
254 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
255 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
256 if (!piocb->vport)
257 BUG();
258 else
259 mod_timer(&piocb->vport->els_tmofunc,
260 jiffies + HZ * (phba->fc_ratov << 1));
261 }
262
dea3101e 263
2e0fef85 264 return 0;
dea3101e 265}
266
267static struct lpfc_iocbq *
2e0fef85 268lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 269{
dea3101e 270 struct lpfc_iocbq *cmd_iocb;
271
858c9f6c
JS
272 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
273 if (cmd_iocb != NULL)
dea3101e 274 pring->txq_cnt--;
2e0fef85 275 return cmd_iocb;
dea3101e 276}
277
278static IOCB_t *
279lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
280{
ed957684
JS
281 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
282 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
283 &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e 284 uint32_t max_cmd_idx = pring->numCiocb;
dea3101e 285
286 if ((pring->next_cmdidx == pring->cmdidx) &&
287 (++pring->next_cmdidx >= max_cmd_idx))
288 pring->next_cmdidx = 0;
289
290 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
291
292 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
293
294 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
295 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 296 "0315 Ring %d issue: portCmdGet %d "
dea3101e 297 "is bigger then cmd ring %d\n",
e8b62011 298 pring->ringno,
dea3101e 299 pring->local_getidx, max_cmd_idx);
300
2e0fef85 301 phba->link_state = LPFC_HBA_ERROR;
dea3101e 302 /*
303 * All error attention handlers are posted to
304 * worker thread
305 */
306 phba->work_ha |= HA_ERATT;
307 phba->work_hs = HS_FFER3;
92d7f7b0
JS
308
309 /* hbalock should already be held */
dea3101e 310 if (phba->work_wait)
92d7f7b0 311 lpfc_worker_wake_up(phba);
dea3101e 312
313 return NULL;
314 }
315
316 if (pring->local_getidx == pring->next_cmdidx)
317 return NULL;
318 }
319
ed957684 320 return lpfc_cmd_iocb(phba, pring);
dea3101e 321}
322
604a3e30 323uint16_t
2e0fef85 324lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 325{
2e0fef85
JS
326 struct lpfc_iocbq **new_arr;
327 struct lpfc_iocbq **old_arr;
604a3e30
JB
328 size_t new_len;
329 struct lpfc_sli *psli = &phba->sli;
330 uint16_t iotag;
dea3101e 331
2e0fef85 332 spin_lock_irq(&phba->hbalock);
604a3e30
JB
333 iotag = psli->last_iotag;
334 if(++iotag < psli->iocbq_lookup_len) {
335 psli->last_iotag = iotag;
336 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 337 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
338 iocbq->iotag = iotag;
339 return iotag;
2e0fef85 340 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
341 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
342 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
343 spin_unlock_irq(&phba->hbalock);
344 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
345 GFP_KERNEL);
346 if (new_arr) {
2e0fef85 347 spin_lock_irq(&phba->hbalock);
604a3e30
JB
348 old_arr = psli->iocbq_lookup;
349 if (new_len <= psli->iocbq_lookup_len) {
350 /* highly unprobable case */
351 kfree(new_arr);
352 iotag = psli->last_iotag;
353 if(++iotag < psli->iocbq_lookup_len) {
354 psli->last_iotag = iotag;
355 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 356 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
357 iocbq->iotag = iotag;
358 return iotag;
359 }
2e0fef85 360 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
361 return 0;
362 }
363 if (psli->iocbq_lookup)
364 memcpy(new_arr, old_arr,
365 ((psli->last_iotag + 1) *
311464ec 366 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
367 psli->iocbq_lookup = new_arr;
368 psli->iocbq_lookup_len = new_len;
369 psli->last_iotag = iotag;
370 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 371 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
372 iocbq->iotag = iotag;
373 kfree(old_arr);
374 return iotag;
375 }
8f6d98d2 376 } else
2e0fef85 377 spin_unlock_irq(&phba->hbalock);
dea3101e 378
604a3e30 379 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
e8b62011
JS
380 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
381 psli->last_iotag);
dea3101e 382
604a3e30 383 return 0;
dea3101e 384}
385
386static void
387lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
388 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
389{
390 /*
604a3e30 391 * Set up an iotag
dea3101e 392 */
604a3e30 393 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 394
a58cbd52
JS
395 if (pring->ringno == LPFC_ELS_RING) {
396 lpfc_debugfs_slow_ring_trc(phba,
397 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
398 *(((uint32_t *) &nextiocb->iocb) + 4),
399 *(((uint32_t *) &nextiocb->iocb) + 6),
400 *(((uint32_t *) &nextiocb->iocb) + 7));
401 }
402
dea3101e 403 /*
404 * Issue iocb command to adapter
405 */
92d7f7b0 406 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 407 wmb();
408 pring->stats.iocb_cmd++;
409
410 /*
411 * If there is no completion routine to call, we can release the
412 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
413 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
414 */
415 if (nextiocb->iocb_cmpl)
416 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 417 else
2e0fef85 418 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 419
420 /*
421 * Let the HBA know what IOCB slot will be the next one the
422 * driver will put a command into.
423 */
424 pring->cmdidx = pring->next_cmdidx;
ed957684 425 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 426}
427
428static void
2e0fef85 429lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 430{
431 int ringno = pring->ringno;
432
433 pring->flag |= LPFC_CALL_RING_AVAILABLE;
434
435 wmb();
436
437 /*
438 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
439 * The HBA will tell us when an IOCB entry is available.
440 */
441 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
442 readl(phba->CAregaddr); /* flush */
443
444 pring->stats.iocb_cmd_full++;
445}
446
447static void
2e0fef85 448lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 449{
450 int ringno = pring->ringno;
451
452 /*
453 * Tell the HBA that there is work to do in this ring.
454 */
455 wmb();
456 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
457 readl(phba->CAregaddr); /* flush */
458}
459
460static void
2e0fef85 461lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 462{
463 IOCB_t *iocb;
464 struct lpfc_iocbq *nextiocb;
465
466 /*
467 * Check to see if:
468 * (a) there is anything on the txq to send
469 * (b) link is up
470 * (c) link attention events can be processed (fcp ring only)
471 * (d) IOCB processing is not blocked by the outstanding mbox command.
472 */
473 if (pring->txq_cnt &&
2e0fef85 474 lpfc_is_link_up(phba) &&
dea3101e 475 (pring->ringno != phba->sli.fcp_ring ||
476 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
477 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
478
479 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
481 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
482
483 if (iocb)
484 lpfc_sli_update_ring(phba, pring);
485 else
486 lpfc_sli_update_full_ring(phba, pring);
487 }
488
489 return;
490}
491
492/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
493static void
2e0fef85 494lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
dea3101e 495{
ed957684
JS
496 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
497 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
498 &phba->slim2p->mbx.us.s2.port[ringno];
2e0fef85 499 unsigned long iflags;
dea3101e 500
501 /* If the ring is active, flag it */
2e0fef85 502 spin_lock_irqsave(&phba->hbalock, iflags);
dea3101e 503 if (phba->sli.ring[ringno].cmdringaddr) {
504 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
505 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
506 /*
507 * Force update of the local copy of cmdGetInx
508 */
509 phba->sli.ring[ringno].local_getidx
510 = le32_to_cpu(pgp->cmdGetInx);
dea3101e 511 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
dea3101e 512 }
513 }
2e0fef85 514 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 515}
516
ed957684
JS
517struct lpfc_hbq_entry *
518lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
519{
520 struct hbq_s *hbqp = &phba->hbqs[hbqno];
521
522 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
523 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
524 hbqp->next_hbqPutIdx = 0;
525
526 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 527 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
528 uint32_t getidx = le32_to_cpu(raw_index);
529
530 hbqp->local_hbqGetIdx = getidx;
531
532 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
533 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 534 LOG_SLI | LOG_VPORT,
e8b62011 535 "1802 HBQ %d: local_hbqGetIdx "
ed957684 536 "%u is > than hbqp->entry_count %u\n",
e8b62011 537 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
538 hbqp->entry_count);
539
540 phba->link_state = LPFC_HBA_ERROR;
541 return NULL;
542 }
543
544 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
545 return NULL;
546 }
547
51ef4c26
JS
548 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
549 hbqp->hbqPutIdx;
ed957684
JS
550}
551
552void
553lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
554{
92d7f7b0
JS
555 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
556 struct hbq_dmabuf *hbq_buf;
51ef4c26 557 int i, hbq_count;
ed957684 558
51ef4c26 559 hbq_count = lpfc_sli_hbq_count();
ed957684 560 /* Return all memory used by all HBQs */
51ef4c26
JS
561 for (i = 0; i < hbq_count; ++i) {
562 list_for_each_entry_safe(dmabuf, next_dmabuf,
563 &phba->hbqs[i].hbq_buffer_list, list) {
564 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565 list_del(&hbq_buf->dbuf.list);
566 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
567 }
ed957684 568 }
ed957684
JS
569}
570
51ef4c26 571static struct lpfc_hbq_entry *
ed957684 572lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 573 struct hbq_dmabuf *hbq_buf)
ed957684
JS
574{
575 struct lpfc_hbq_entry *hbqe;
92d7f7b0 576 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
577
578 /* Get next HBQ entry slot to use */
579 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
580 if (hbqe) {
581 struct hbq_s *hbqp = &phba->hbqs[hbqno];
582
92d7f7b0
JS
583 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
584 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 585 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 586 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
587 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
588 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589 /* Sync SLIM */
ed957684
JS
590 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
591 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 592 /* flush */
ed957684 593 readl(phba->hbq_put + hbqno);
51ef4c26 594 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
ed957684 595 }
51ef4c26 596 return hbqe;
ed957684
JS
597}
598
92d7f7b0
JS
599static struct lpfc_hbq_init lpfc_els_hbq = {
600 .rn = 1,
601 .entry_count = 200,
602 .mask_count = 0,
603 .profile = 0,
51ef4c26 604 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0
JS
605 .buffer_count = 0,
606 .init_count = 20,
607 .add_count = 5,
608};
ed957684 609
51ef4c26
JS
610static struct lpfc_hbq_init lpfc_extra_hbq = {
611 .rn = 1,
612 .entry_count = 200,
613 .mask_count = 0,
614 .profile = 0,
615 .ring_mask = (1 << LPFC_EXTRA_RING),
616 .buffer_count = 0,
617 .init_count = 0,
618 .add_count = 5,
619};
620
78b2d852 621struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 622 &lpfc_els_hbq,
51ef4c26 623 &lpfc_extra_hbq,
92d7f7b0 624};
ed957684 625
311464ec 626static int
92d7f7b0 627lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 628{
92d7f7b0
JS
629 uint32_t i, start, end;
630 struct hbq_dmabuf *hbq_buffer;
ed957684 631
51ef4c26
JS
632 if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
633 return 0;
634 }
635
92d7f7b0
JS
636 start = lpfc_hbq_defs[hbqno]->buffer_count;
637 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
638 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
639 end = lpfc_hbq_defs[hbqno]->entry_count;
640 }
ed957684
JS
641
642 /* Populate HBQ entries */
92d7f7b0 643 for (i = start; i < end; i++) {
51ef4c26 644 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
92d7f7b0
JS
645 if (!hbq_buffer)
646 return 1;
92d7f7b0 647 hbq_buffer->tag = (i | (hbqno << 16));
51ef4c26
JS
648 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
649 lpfc_hbq_defs[hbqno]->buffer_count++;
650 else
651 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
652 }
653 return 0;
654}
655
92d7f7b0
JS
656int
657lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 658{
92d7f7b0
JS
659 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
660 lpfc_hbq_defs[qno]->add_count));
661}
ed957684 662
92d7f7b0
JS
663int
664lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
665{
666 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
667 lpfc_hbq_defs[qno]->init_count));
ed957684
JS
668}
669
92d7f7b0
JS
670struct hbq_dmabuf *
671lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 672{
92d7f7b0
JS
673 struct lpfc_dmabuf *d_buf;
674 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
675 uint32_t hbqno;
676
677 hbqno = tag >> 16;
a0a74e45 678 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 679 return NULL;
ed957684 680
51ef4c26 681 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 682 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 683 if (hbq_buf->tag == tag) {
92d7f7b0 684 return hbq_buf;
ed957684
JS
685 }
686 }
92d7f7b0 687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011
JS
688 "1803 Bad hbq tag. Data: x%x x%x\n",
689 tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
92d7f7b0 690 return NULL;
ed957684
JS
691}
692
693void
51ef4c26 694lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
695{
696 uint32_t hbqno;
697
51ef4c26
JS
698 if (hbq_buffer) {
699 hbqno = hbq_buffer->tag >> 16;
700 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
701 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
702 }
ed957684
JS
703 }
704}
705
dea3101e 706static int
707lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
708{
709 uint8_t ret;
710
711 switch (mbxCommand) {
712 case MBX_LOAD_SM:
713 case MBX_READ_NV:
714 case MBX_WRITE_NV:
715 case MBX_RUN_BIU_DIAG:
716 case MBX_INIT_LINK:
717 case MBX_DOWN_LINK:
718 case MBX_CONFIG_LINK:
719 case MBX_CONFIG_RING:
720 case MBX_RESET_RING:
721 case MBX_READ_CONFIG:
722 case MBX_READ_RCONFIG:
723 case MBX_READ_SPARM:
724 case MBX_READ_STATUS:
725 case MBX_READ_RPI:
726 case MBX_READ_XRI:
727 case MBX_READ_REV:
728 case MBX_READ_LNK_STAT:
729 case MBX_REG_LOGIN:
730 case MBX_UNREG_LOGIN:
731 case MBX_READ_LA:
732 case MBX_CLEAR_LA:
733 case MBX_DUMP_MEMORY:
734 case MBX_DUMP_CONTEXT:
735 case MBX_RUN_DIAGS:
736 case MBX_RESTART:
737 case MBX_UPDATE_CFG:
738 case MBX_DOWN_LOAD:
739 case MBX_DEL_LD_ENTRY:
740 case MBX_RUN_PROGRAM:
741 case MBX_SET_MASK:
742 case MBX_SET_SLIM:
743 case MBX_UNREG_D_ID:
41415862 744 case MBX_KILL_BOARD:
dea3101e 745 case MBX_CONFIG_FARP:
41415862 746 case MBX_BEACON:
dea3101e 747 case MBX_LOAD_AREA:
748 case MBX_RUN_BIU_DIAG64:
749 case MBX_CONFIG_PORT:
750 case MBX_READ_SPARM64:
751 case MBX_READ_RPI64:
752 case MBX_REG_LOGIN64:
753 case MBX_READ_LA64:
754 case MBX_FLASH_WR_ULA:
755 case MBX_SET_DEBUG:
756 case MBX_LOAD_EXP_ROM:
92d7f7b0
JS
757 case MBX_REG_VPI:
758 case MBX_UNREG_VPI:
858c9f6c 759 case MBX_HEARTBEAT:
dea3101e 760 ret = mbxCommand;
761 break;
762 default:
763 ret = MBX_SHUTDOWN;
764 break;
765 }
2e0fef85 766 return ret;
dea3101e 767}
768static void
2e0fef85 769lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 770{
771 wait_queue_head_t *pdone_q;
858c9f6c 772 unsigned long drvr_flag;
dea3101e 773
774 /*
775 * If pdone_q is empty, the driver thread gave up waiting and
776 * continued running.
777 */
7054a606 778 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 779 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 780 pdone_q = (wait_queue_head_t *) pmboxq->context1;
781 if (pdone_q)
782 wake_up_interruptible(pdone_q);
858c9f6c 783 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 784 return;
785}
786
787void
2e0fef85 788lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 789{
790 struct lpfc_dmabuf *mp;
7054a606
JS
791 uint16_t rpi;
792 int rc;
793
dea3101e 794 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 795
dea3101e 796 if (mp) {
797 lpfc_mbuf_free(phba, mp->virt, mp->phys);
798 kfree(mp);
799 }
7054a606
JS
800
801 /*
802 * If a REG_LOGIN succeeded after node is destroyed or node
803 * is in re-discovery driver need to cleanup the RPI.
804 */
2e0fef85
JS
805 if (!(phba->pport->load_flag & FC_UNLOADING) &&
806 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
807 !pmb->mb.mbxStatus) {
7054a606
JS
808
809 rpi = pmb->mb.un.varWords[0];
92d7f7b0
JS
810 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
811 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
812 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
813 if (rc != MBX_NOT_FINISHED)
814 return;
815 }
816
2e0fef85 817 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 818 return;
819}
820
821int
2e0fef85 822lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 823{
92d7f7b0 824 MAILBOX_t *pmbox;
dea3101e 825 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
826 int rc;
827 LIST_HEAD(cmplq);
dea3101e 828
829 phba->sli.slistat.mbox_event++;
830
92d7f7b0
JS
831 /* Get all completed mailboxe buffers into the cmplq */
832 spin_lock_irq(&phba->hbalock);
833 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
834 spin_unlock_irq(&phba->hbalock);
dea3101e 835
92d7f7b0
JS
836 /* Get a Mailbox buffer to setup mailbox commands for callback */
837 do {
838 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
839 if (pmb == NULL)
840 break;
2e0fef85 841
92d7f7b0 842 pmbox = &pmb->mb;
dea3101e 843
858c9f6c
JS
844 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
845 if (pmb->vport) {
846 lpfc_debugfs_disc_trc(pmb->vport,
847 LPFC_DISC_TRC_MBOX_VPORT,
848 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
849 (uint32_t)pmbox->mbxCommand,
850 pmbox->un.varWords[0],
851 pmbox->un.varWords[1]);
852 }
853 else {
854 lpfc_debugfs_disc_trc(phba->pport,
855 LPFC_DISC_TRC_MBOX,
856 "MBOX cmpl: cmd:x%x mb:x%x x%x",
857 (uint32_t)pmbox->mbxCommand,
858 pmbox->un.varWords[0],
859 pmbox->un.varWords[1]);
860 }
861 }
862
dea3101e 863 /*
864 * It is a fatal error if unknown mbox command completion.
865 */
866 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
867 MBX_SHUTDOWN) {
dea3101e 868 /* Unknow mailbox command compl */
92d7f7b0 869 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 870 "(%d):0323 Unknown Mailbox command "
92d7f7b0 871 "%x Cmpl\n",
92d7f7b0
JS
872 pmb->vport ? pmb->vport->vpi : 0,
873 pmbox->mbxCommand);
2e0fef85 874 phba->link_state = LPFC_HBA_ERROR;
dea3101e 875 phba->work_hs = HS_FFER3;
876 lpfc_handle_eratt(phba);
92d7f7b0 877 continue;
dea3101e 878 }
879
dea3101e 880 if (pmbox->mbxStatus) {
881 phba->sli.slistat.mbox_stat_err++;
882 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
883 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0
JS
884 lpfc_printf_log(phba, KERN_INFO,
885 LOG_MBOX | LOG_SLI,
e8b62011 886 "(%d):0305 Mbox cmd cmpl "
92d7f7b0
JS
887 "error - RETRYing Data: x%x "
888 "x%x x%x x%x\n",
92d7f7b0
JS
889 pmb->vport ? pmb->vport->vpi :0,
890 pmbox->mbxCommand,
891 pmbox->mbxStatus,
892 pmbox->un.varWords[0],
893 pmb->vport->port_state);
dea3101e 894 pmbox->mbxStatus = 0;
895 pmbox->mbxOwner = OWN_HOST;
2e0fef85 896 spin_lock_irq(&phba->hbalock);
dea3101e 897 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 898 spin_unlock_irq(&phba->hbalock);
dea3101e 899 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
900 if (rc == MBX_SUCCESS)
92d7f7b0 901 continue;
dea3101e 902 }
903 }
904
905 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 906 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 907 "(%d):0307 Mailbox cmd x%x Cmpl x%p "
dea3101e 908 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
92d7f7b0 909 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 910 pmbox->mbxCommand,
911 pmb->mbox_cmpl,
912 *((uint32_t *) pmbox),
913 pmbox->un.varWords[0],
914 pmbox->un.varWords[1],
915 pmbox->un.varWords[2],
916 pmbox->un.varWords[3],
917 pmbox->un.varWords[4],
918 pmbox->un.varWords[5],
919 pmbox->un.varWords[6],
920 pmbox->un.varWords[7]);
921
92d7f7b0 922 if (pmb->mbox_cmpl)
dea3101e 923 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
924 } while (1);
925 return 0;
926}
dea3101e 927
92d7f7b0
JS
928static struct lpfc_dmabuf *
929lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
930{
931 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
51ef4c26
JS
932 uint32_t hbqno;
933 void *virt; /* virtual address ptr */
934 dma_addr_t phys; /* mapped address */
dea3101e 935
92d7f7b0
JS
936 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
937 if (hbq_entry == NULL)
938 return NULL;
939 list_del(&hbq_entry->dbuf.list);
51ef4c26
JS
940
941 hbqno = tag >> 16;
942 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
92d7f7b0
JS
943 if (new_hbq_entry == NULL)
944 return &hbq_entry->dbuf;
92d7f7b0 945 new_hbq_entry->tag = -1;
51ef4c26
JS
946 phys = new_hbq_entry->dbuf.phys;
947 virt = new_hbq_entry->dbuf.virt;
948 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
949 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
950 hbq_entry->dbuf.phys = phys;
951 hbq_entry->dbuf.virt = virt;
92d7f7b0
JS
952 lpfc_sli_free_hbq(phba, hbq_entry);
953 return &new_hbq_entry->dbuf;
dea3101e 954}
92d7f7b0 955
dea3101e 956static int
957lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
958 struct lpfc_iocbq *saveq)
959{
960 IOCB_t * irsp;
961 WORD5 * w5p;
962 uint32_t Rctl, Type;
963 uint32_t match, i;
964
965 match = 0;
966 irsp = &(saveq->iocb);
967 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
ed957684
JS
968 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
969 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
970 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
dea3101e 971 Rctl = FC_ELS_REQ;
972 Type = FC_ELS_DATA;
973 } else {
974 w5p =
975 (WORD5 *) & (saveq->iocb.un.
976 ulpWord[5]);
977 Rctl = w5p->hcsw.Rctl;
978 Type = w5p->hcsw.Type;
979
980 /* Firmware Workaround */
981 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
92d7f7b0
JS
982 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
983 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
dea3101e 984 Rctl = FC_ELS_REQ;
985 Type = FC_ELS_DATA;
986 w5p->hcsw.Rctl = Rctl;
987 w5p->hcsw.Type = Type;
988 }
989 }
92d7f7b0
JS
990
991 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
992 if (irsp->ulpBdeCount != 0)
993 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
994 irsp->un.ulpWord[3]);
995 if (irsp->ulpBdeCount == 2)
996 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
51ef4c26 997 irsp->unsli3.sli3Words[7]);
92d7f7b0
JS
998 }
999
dea3101e 1000 /* unSolicited Responses */
1001 if (pring->prt[0].profile) {
cf5bf97e
JW
1002 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1003 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1004 saveq);
dea3101e 1005 match = 1;
1006 } else {
1007 /* We must search, based on rctl / type
1008 for the right routine */
1009 for (i = 0; i < pring->num_mask;
1010 i++) {
1011 if ((pring->prt[i].rctl ==
1012 Rctl)
1013 && (pring->prt[i].
1014 type == Type)) {
cf5bf97e
JW
1015 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1016 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1017 (phba, pring, saveq);
dea3101e 1018 match = 1;
1019 break;
1020 }
1021 }
1022 }
1023 if (match == 0) {
1024 /* Unexpected Rctl / Type received */
1025 /* Ring <ringno> handler: unexpected
1026 Rctl <Rctl> Type <Type> received */
92d7f7b0 1027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 1028 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 1029 "Type x%x received\n",
e8b62011 1030 pring->ringno, Rctl, Type);
dea3101e 1031 }
92d7f7b0 1032 return 1;
dea3101e 1033}
1034
1035static struct lpfc_iocbq *
2e0fef85
JS
1036lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1037 struct lpfc_sli_ring *pring,
1038 struct lpfc_iocbq *prspiocb)
dea3101e 1039{
dea3101e 1040 struct lpfc_iocbq *cmd_iocb = NULL;
1041 uint16_t iotag;
1042
604a3e30
JB
1043 iotag = prspiocb->iocb.ulpIoTag;
1044
1045 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1046 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 1047 list_del_init(&cmd_iocb->list);
604a3e30
JB
1048 pring->txcmplq_cnt--;
1049 return cmd_iocb;
dea3101e 1050 }
1051
dea3101e 1052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1053 "0317 iotag x%x is out off "
604a3e30 1054 "range: max iotag x%x wd0 x%x\n",
e8b62011 1055 iotag, phba->sli.last_iotag,
604a3e30 1056 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 1057 return NULL;
1058}
1059
1060static int
2e0fef85 1061lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 1062 struct lpfc_iocbq *saveq)
1063{
2e0fef85 1064 struct lpfc_iocbq *cmdiocbp;
dea3101e 1065 int rc = 1;
1066 unsigned long iflag;
1067
1068 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 1069 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 1070 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
1071 spin_unlock_irqrestore(&phba->hbalock, iflag);
1072
dea3101e 1073 if (cmdiocbp) {
1074 if (cmdiocbp->iocb_cmpl) {
1075 /*
1076 * Post all ELS completions to the worker thread.
1077 * All other are passed to the completion callback.
1078 */
1079 if (pring->ringno == LPFC_ELS_RING) {
07951076
JS
1080 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1081 cmdiocbp->iocb_flag &=
1082 ~LPFC_DRIVER_ABORTED;
1083 saveq->iocb.ulpStatus =
1084 IOSTAT_LOCAL_REJECT;
1085 saveq->iocb.un.ulpWord[4] =
1086 IOERR_SLI_ABORTED;
1087 }
dea3101e 1088 }
2e0fef85 1089 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
1090 } else
1091 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 1092 } else {
1093 /*
1094 * Unknown initiating command based on the response iotag.
1095 * This could be the case on the ELS ring because of
1096 * lpfc_els_abort().
1097 */
1098 if (pring->ringno != LPFC_ELS_RING) {
1099 /*
1100 * Ring <ringno> handler: unexpected completion IoTag
1101 * <IoTag>
1102 */
e8b62011
JS
1103 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
1104 "0322 Ring %d handler: "
1105 "unexpected completion IoTag x%x "
1106 "Data: x%x x%x x%x x%x\n",
1107 pring->ringno,
1108 saveq->iocb.ulpIoTag,
1109 saveq->iocb.ulpStatus,
1110 saveq->iocb.un.ulpWord[4],
1111 saveq->iocb.ulpCommand,
1112 saveq->iocb.ulpContext);
dea3101e 1113 }
1114 }
68876920 1115
dea3101e 1116 return rc;
1117}
1118
2e0fef85
JS
1119static void
1120lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 1121{
ed957684
JS
1122 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1123 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1124 &phba->slim2p->mbx.us.s2.port[pring->ringno];
875fbdfe
JSEC
1125 /*
1126 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1127 * rsp ring <portRspMax>
1128 */
1129 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1130 "0312 Ring %d handler: portRspPut %d "
875fbdfe 1131 "is bigger then rsp ring %d\n",
e8b62011 1132 pring->ringno, le32_to_cpu(pgp->rspPutInx),
875fbdfe
JSEC
1133 pring->numRiocb);
1134
2e0fef85 1135 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
1136
1137 /*
1138 * All error attention handlers are posted to
1139 * worker thread
1140 */
1141 phba->work_ha |= HA_ERATT;
1142 phba->work_hs = HS_FFER3;
92d7f7b0
JS
1143
1144 /* hbalock should already be held */
875fbdfe 1145 if (phba->work_wait)
92d7f7b0 1146 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
1147
1148 return;
1149}
1150
2e0fef85 1151void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
875fbdfe 1152{
2e0fef85
JS
1153 struct lpfc_sli *psli = &phba->sli;
1154 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
875fbdfe
JSEC
1155 IOCB_t *irsp = NULL;
1156 IOCB_t *entry = NULL;
1157 struct lpfc_iocbq *cmdiocbq = NULL;
1158 struct lpfc_iocbq rspiocbq;
1159 struct lpfc_pgp *pgp;
1160 uint32_t status;
1161 uint32_t portRspPut, portRspMax;
1162 int type;
1163 uint32_t rsp_cmpl = 0;
875fbdfe 1164 uint32_t ha_copy;
2e0fef85 1165 unsigned long iflags;
875fbdfe
JSEC
1166
1167 pring->stats.iocb_event++;
1168
ed957684
JS
1169 pgp = (phba->sli_rev == 3) ?
1170 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1171 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1172
875fbdfe
JSEC
1173
1174 /*
1175 * The next available response entry should never exceed the maximum
1176 * entries. If it does, treat it as an adapter hardware error.
1177 */
1178 portRspMax = pring->numRiocb;
1179 portRspPut = le32_to_cpu(pgp->rspPutInx);
1180 if (unlikely(portRspPut >= portRspMax)) {
1181 lpfc_sli_rsp_pointers_error(phba, pring);
1182 return;
1183 }
1184
1185 rmb();
1186 while (pring->rspidx != portRspPut) {
ed957684 1187 entry = lpfc_resp_iocb(phba, pring);
875fbdfe
JSEC
1188 if (++pring->rspidx >= portRspMax)
1189 pring->rspidx = 0;
1190
1191 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1192 (uint32_t *) &rspiocbq.iocb,
92d7f7b0 1193 phba->iocb_rsp_size);
875fbdfe
JSEC
1194 irsp = &rspiocbq.iocb;
1195 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1196 pring->stats.iocb_rsp++;
1197 rsp_cmpl++;
1198
1199 if (unlikely(irsp->ulpStatus)) {
1200 /* Rsp ring <ringno> error: IOCB */
1201 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 1202 "0326 Rsp Ring %d error: IOCB Data: "
875fbdfe 1203 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 1204 pring->ringno,
875fbdfe
JSEC
1205 irsp->un.ulpWord[0],
1206 irsp->un.ulpWord[1],
1207 irsp->un.ulpWord[2],
1208 irsp->un.ulpWord[3],
1209 irsp->un.ulpWord[4],
1210 irsp->un.ulpWord[5],
1211 *(((uint32_t *) irsp) + 6),
1212 *(((uint32_t *) irsp) + 7));
1213 }
1214
1215 switch (type) {
1216 case LPFC_ABORT_IOCB:
1217 case LPFC_SOL_IOCB:
1218 /*
1219 * Idle exchange closed via ABTS from port. No iocb
1220 * resources need to be recovered.
1221 */
1222 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 1223 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
1224 "0314 IOCB cmd 0x%x "
1225 "processed. Skipping "
1226 "completion",
dca9479b 1227 irsp->ulpCommand);
875fbdfe
JSEC
1228 break;
1229 }
1230
2e0fef85 1231 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1232 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1233 &rspiocbq);
2e0fef85 1234 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1235 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1236 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1237 &rspiocbq);
1238 }
1239 break;
1240 default:
1241 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1242 char adaptermsg[LPFC_MAX_ADPTMSG];
1243 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1244 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1245 MAX_MSG_DATA);
898eb71c
JP
1246 dev_warn(&((phba->pcidev)->dev),
1247 "lpfc%d: %s\n",
875fbdfe
JSEC
1248 phba->brd_no, adaptermsg);
1249 } else {
1250 /* Unknown IOCB command */
1251 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1252 "0321 Unknown IOCB command "
875fbdfe 1253 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 1254 type, irsp->ulpCommand,
875fbdfe
JSEC
1255 irsp->ulpStatus,
1256 irsp->ulpIoTag,
1257 irsp->ulpContext);
1258 }
1259 break;
1260 }
1261
1262 /*
1263 * The response IOCB has been processed. Update the ring
1264 * pointer in SLIM. If the port response put pointer has not
1265 * been updated, sync the pgp->rspPutInx and fetch the new port
1266 * response put pointer.
1267 */
ed957684 1268 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
875fbdfe
JSEC
1269
1270 if (pring->rspidx == portRspPut)
1271 portRspPut = le32_to_cpu(pgp->rspPutInx);
1272 }
1273
1274 ha_copy = readl(phba->HAregaddr);
1275 ha_copy >>= (LPFC_FCP_RING * 4);
1276
1277 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2e0fef85 1278 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1279 pring->stats.iocb_rsp_full++;
1280 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1281 writel(status, phba->CAregaddr);
1282 readl(phba->CAregaddr);
2e0fef85 1283 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1284 }
1285 if ((ha_copy & HA_R0CE_RSP) &&
1286 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2e0fef85 1287 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1288 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1289 pring->stats.iocb_cmd_empty++;
1290
1291 /* Force update of the local copy of cmdGetInx */
1292 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1293 lpfc_sli_resume_iocb(phba, pring);
1294
1295 if ((pring->lpfc_sli_cmd_available))
1296 (pring->lpfc_sli_cmd_available) (phba, pring);
1297
2e0fef85 1298 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1299 }
1300
1301 return;
1302}
1303
dea3101e 1304/*
1305 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1306 * to check it explicitly.
1307 */
1308static int
2e0fef85
JS
1309lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1310 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 1311{
ed957684
JS
1312 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1313 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1314 &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e 1315 IOCB_t *irsp = NULL;
87f6eaff 1316 IOCB_t *entry = NULL;
dea3101e 1317 struct lpfc_iocbq *cmdiocbq = NULL;
1318 struct lpfc_iocbq rspiocbq;
dea3101e 1319 uint32_t status;
1320 uint32_t portRspPut, portRspMax;
1321 int rc = 1;
1322 lpfc_iocb_type type;
1323 unsigned long iflag;
1324 uint32_t rsp_cmpl = 0;
dea3101e 1325
2e0fef85 1326 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 1327 pring->stats.iocb_event++;
1328
dea3101e 1329 /*
1330 * The next available response entry should never exceed the maximum
1331 * entries. If it does, treat it as an adapter hardware error.
1332 */
1333 portRspMax = pring->numRiocb;
1334 portRspPut = le32_to_cpu(pgp->rspPutInx);
1335 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 1336 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 1337 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 1338 return 1;
1339 }
1340
1341 rmb();
1342 while (pring->rspidx != portRspPut) {
87f6eaff
JSEC
1343 /*
1344 * Fetch an entry off the ring and copy it into a local data
1345 * structure. The copy involves a byte-swap since the
1346 * network byte order and pci byte orders are different.
1347 */
ed957684 1348 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 1349 phba->last_completion_time = jiffies;
875fbdfe
JSEC
1350
1351 if (++pring->rspidx >= portRspMax)
1352 pring->rspidx = 0;
1353
87f6eaff
JSEC
1354 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1355 (uint32_t *) &rspiocbq.iocb,
ed957684 1356 phba->iocb_rsp_size);
a4bc3379 1357 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
1358 irsp = &rspiocbq.iocb;
1359
dea3101e 1360 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1361 pring->stats.iocb_rsp++;
1362 rsp_cmpl++;
1363
1364 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
1365 /*
1366 * If resource errors reported from HBA, reduce
1367 * queuedepths of the SCSI device.
1368 */
1369 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1370 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1371 spin_unlock_irqrestore(&phba->hbalock, iflag);
1372 lpfc_adjust_queue_depth(phba);
1373 spin_lock_irqsave(&phba->hbalock, iflag);
1374 }
1375
dea3101e 1376 /* Rsp ring <ringno> error: IOCB */
1377 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 1378 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 1379 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 1380 pring->ringno,
92d7f7b0
JS
1381 irsp->un.ulpWord[0],
1382 irsp->un.ulpWord[1],
1383 irsp->un.ulpWord[2],
1384 irsp->un.ulpWord[3],
1385 irsp->un.ulpWord[4],
1386 irsp->un.ulpWord[5],
1387 *(((uint32_t *) irsp) + 6),
1388 *(((uint32_t *) irsp) + 7));
dea3101e 1389 }
1390
1391 switch (type) {
1392 case LPFC_ABORT_IOCB:
1393 case LPFC_SOL_IOCB:
1394 /*
1395 * Idle exchange closed via ABTS from port. No iocb
1396 * resources need to be recovered.
1397 */
1398 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 1399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 1400 "0333 IOCB cmd 0x%x"
dca9479b 1401 " processed. Skipping"
92d7f7b0 1402 " completion\n",
dca9479b 1403 irsp->ulpCommand);
dea3101e 1404 break;
1405 }
1406
604a3e30
JB
1407 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1408 &rspiocbq);
dea3101e 1409 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
b808608b
JW
1410 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1411 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1412 &rspiocbq);
1413 } else {
2e0fef85
JS
1414 spin_unlock_irqrestore(&phba->hbalock,
1415 iflag);
b808608b
JW
1416 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1417 &rspiocbq);
2e0fef85 1418 spin_lock_irqsave(&phba->hbalock,
b808608b
JW
1419 iflag);
1420 }
dea3101e 1421 }
1422 break;
a4bc3379 1423 case LPFC_UNSOL_IOCB:
2e0fef85 1424 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 1425 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 1426 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 1427 break;
dea3101e 1428 default:
1429 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1430 char adaptermsg[LPFC_MAX_ADPTMSG];
1431 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1432 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1433 MAX_MSG_DATA);
898eb71c
JP
1434 dev_warn(&((phba->pcidev)->dev),
1435 "lpfc%d: %s\n",
dea3101e 1436 phba->brd_no, adaptermsg);
1437 } else {
1438 /* Unknown IOCB command */
1439 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1440 "0334 Unknown IOCB command "
92d7f7b0 1441 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 1442 type, irsp->ulpCommand,
92d7f7b0
JS
1443 irsp->ulpStatus,
1444 irsp->ulpIoTag,
1445 irsp->ulpContext);
dea3101e 1446 }
1447 break;
1448 }
1449
1450 /*
1451 * The response IOCB has been processed. Update the ring
1452 * pointer in SLIM. If the port response put pointer has not
1453 * been updated, sync the pgp->rspPutInx and fetch the new port
1454 * response put pointer.
1455 */
ed957684 1456 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 1457
1458 if (pring->rspidx == portRspPut)
1459 portRspPut = le32_to_cpu(pgp->rspPutInx);
1460 }
1461
1462 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1463 pring->stats.iocb_rsp_full++;
1464 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1465 writel(status, phba->CAregaddr);
1466 readl(phba->CAregaddr);
1467 }
1468 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1469 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1470 pring->stats.iocb_cmd_empty++;
1471
1472 /* Force update of the local copy of cmdGetInx */
1473 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1474 lpfc_sli_resume_iocb(phba, pring);
1475
1476 if ((pring->lpfc_sli_cmd_available))
1477 (pring->lpfc_sli_cmd_available) (phba, pring);
1478
1479 }
1480
2e0fef85 1481 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 1482 return rc;
1483}
1484
dea3101e 1485int
2e0fef85
JS
1486lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1487 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 1488{
ed957684
JS
1489 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1490 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1491 &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e 1492 IOCB_t *entry;
1493 IOCB_t *irsp = NULL;
1494 struct lpfc_iocbq *rspiocbp = NULL;
1495 struct lpfc_iocbq *next_iocb;
1496 struct lpfc_iocbq *cmdiocbp;
1497 struct lpfc_iocbq *saveq;
dea3101e 1498 uint8_t iocb_cmd_type;
1499 lpfc_iocb_type type;
1500 uint32_t status, free_saveq;
1501 uint32_t portRspPut, portRspMax;
1502 int rc = 1;
1503 unsigned long iflag;
dea3101e 1504
2e0fef85 1505 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 1506 pring->stats.iocb_event++;
1507
dea3101e 1508 /*
1509 * The next available response entry should never exceed the maximum
1510 * entries. If it does, treat it as an adapter hardware error.
1511 */
1512 portRspMax = pring->numRiocb;
1513 portRspPut = le32_to_cpu(pgp->rspPutInx);
1514 if (portRspPut >= portRspMax) {
1515 /*
1516 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1517 * rsp ring <portRspMax>
1518 */
ed957684 1519 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1520 "0303 Ring %d handler: portRspPut %d "
dea3101e 1521 "is bigger then rsp ring %d\n",
e8b62011 1522 pring->ringno, portRspPut, portRspMax);
dea3101e 1523
2e0fef85
JS
1524 phba->link_state = LPFC_HBA_ERROR;
1525 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 1526
1527 phba->work_hs = HS_FFER3;
1528 lpfc_handle_eratt(phba);
1529
1530 return 1;
1531 }
1532
1533 rmb();
dea3101e 1534 while (pring->rspidx != portRspPut) {
1535 /*
1536 * Build a completion list and call the appropriate handler.
1537 * The process is to get the next available response iocb, get
1538 * a free iocb from the list, copy the response data into the
1539 * free iocb, insert to the continuation list, and update the
1540 * next response index to slim. This process makes response
1541 * iocb's in the ring available to DMA as fast as possible but
1542 * pays a penalty for a copy operation. Since the iocb is
1543 * only 32 bytes, this penalty is considered small relative to
1544 * the PCI reads for register values and a slim write. When
1545 * the ulpLe field is set, the entire Command has been
1546 * received.
1547 */
ed957684
JS
1548 entry = lpfc_resp_iocb(phba, pring);
1549
858c9f6c 1550 phba->last_completion_time = jiffies;
2e0fef85 1551 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 1552 if (rspiocbp == NULL) {
1553 printk(KERN_ERR "%s: out of buffers! Failing "
1554 "completion.\n", __FUNCTION__);
1555 break;
1556 }
1557
ed957684
JS
1558 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1559 phba->iocb_rsp_size);
dea3101e 1560 irsp = &rspiocbp->iocb;
1561
1562 if (++pring->rspidx >= portRspMax)
1563 pring->rspidx = 0;
1564
a58cbd52
JS
1565 if (pring->ringno == LPFC_ELS_RING) {
1566 lpfc_debugfs_slow_ring_trc(phba,
1567 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1568 *(((uint32_t *) irsp) + 4),
1569 *(((uint32_t *) irsp) + 6),
1570 *(((uint32_t *) irsp) + 7));
1571 }
1572
ed957684 1573 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 1574
1575 if (list_empty(&(pring->iocb_continueq))) {
1576 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1577 } else {
1578 list_add_tail(&rspiocbp->list,
1579 &(pring->iocb_continueq));
1580 }
1581
1582 pring->iocb_continueq_cnt++;
1583 if (irsp->ulpLe) {
1584 /*
1585 * By default, the driver expects to free all resources
1586 * associated with this iocb completion.
1587 */
1588 free_saveq = 1;
1589 saveq = list_get_first(&pring->iocb_continueq,
1590 struct lpfc_iocbq, list);
1591 irsp = &(saveq->iocb);
1592 list_del_init(&pring->iocb_continueq);
1593 pring->iocb_continueq_cnt = 0;
1594
1595 pring->stats.iocb_rsp++;
1596
92d7f7b0
JS
1597 /*
1598 * If resource errors reported from HBA, reduce
1599 * queuedepths of the SCSI device.
1600 */
1601 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1602 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1603 spin_unlock_irqrestore(&phba->hbalock, iflag);
1604 lpfc_adjust_queue_depth(phba);
1605 spin_lock_irqsave(&phba->hbalock, iflag);
1606 }
1607
dea3101e 1608 if (irsp->ulpStatus) {
1609 /* Rsp ring <ringno> error: IOCB */
ed957684 1610 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 1611 "0328 Rsp Ring %d error: "
ed957684
JS
1612 "IOCB Data: "
1613 "x%x x%x x%x x%x "
1614 "x%x x%x x%x x%x "
1615 "x%x x%x x%x x%x "
1616 "x%x x%x x%x x%x\n",
ed957684
JS
1617 pring->ringno,
1618 irsp->un.ulpWord[0],
1619 irsp->un.ulpWord[1],
1620 irsp->un.ulpWord[2],
1621 irsp->un.ulpWord[3],
1622 irsp->un.ulpWord[4],
1623 irsp->un.ulpWord[5],
1624 *(((uint32_t *) irsp) + 6),
1625 *(((uint32_t *) irsp) + 7),
1626 *(((uint32_t *) irsp) + 8),
1627 *(((uint32_t *) irsp) + 9),
1628 *(((uint32_t *) irsp) + 10),
1629 *(((uint32_t *) irsp) + 11),
1630 *(((uint32_t *) irsp) + 12),
1631 *(((uint32_t *) irsp) + 13),
1632 *(((uint32_t *) irsp) + 14),
1633 *(((uint32_t *) irsp) + 15));
dea3101e 1634 }
1635
1636 /*
1637 * Fetch the IOCB command type and call the correct
1638 * completion routine. Solicited and Unsolicited
1639 * IOCBs on the ELS ring get freed back to the
1640 * lpfc_iocb_list by the discovery kernel thread.
1641 */
1642 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1643 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1644 if (type == LPFC_SOL_IOCB) {
2e0fef85 1645 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 1646 iflag);
1647 rc = lpfc_sli_process_sol_iocb(phba, pring,
2e0fef85
JS
1648 saveq);
1649 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 1650 } else if (type == LPFC_UNSOL_IOCB) {
2e0fef85 1651 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 1652 iflag);
1653 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2e0fef85
JS
1654 saveq);
1655 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 1656 } else if (type == LPFC_ABORT_IOCB) {
1657 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1658 ((cmdiocbp =
604a3e30
JB
1659 lpfc_sli_iocbq_lookup(phba, pring,
1660 saveq)))) {
dea3101e 1661 /* Call the specified completion
1662 routine */
1663 if (cmdiocbp->iocb_cmpl) {
1664 spin_unlock_irqrestore(
2e0fef85 1665 &phba->hbalock,
dea3101e 1666 iflag);
1667 (cmdiocbp->iocb_cmpl) (phba,
1668 cmdiocbp, saveq);
1669 spin_lock_irqsave(
2e0fef85 1670 &phba->hbalock,
dea3101e 1671 iflag);
604a3e30 1672 } else
2e0fef85 1673 __lpfc_sli_release_iocbq(phba,
604a3e30 1674 cmdiocbp);
dea3101e 1675 }
1676 } else if (type == LPFC_UNKNOWN_IOCB) {
1677 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1678
1679 char adaptermsg[LPFC_MAX_ADPTMSG];
1680
1681 memset(adaptermsg, 0,
1682 LPFC_MAX_ADPTMSG);
1683 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1684 MAX_MSG_DATA);
1685 dev_warn(&((phba->pcidev)->dev),
898eb71c 1686 "lpfc%d: %s\n",
dea3101e 1687 phba->brd_no, adaptermsg);
1688 } else {
1689 /* Unknown IOCB command */
92d7f7b0 1690 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1691 "0335 Unknown IOCB "
92d7f7b0
JS
1692 "command Data: x%x "
1693 "x%x x%x x%x\n",
92d7f7b0
JS
1694 irsp->ulpCommand,
1695 irsp->ulpStatus,
1696 irsp->ulpIoTag,
1697 irsp->ulpContext);
dea3101e 1698 }
1699 }
1700
1701 if (free_saveq) {
2e0fef85
JS
1702 list_for_each_entry_safe(rspiocbp, next_iocb,
1703 &saveq->list, list) {
1704 list_del(&rspiocbp->list);
1705 __lpfc_sli_release_iocbq(phba,
1706 rspiocbp);
dea3101e 1707 }
2e0fef85 1708 __lpfc_sli_release_iocbq(phba, saveq);
dea3101e 1709 }
92d7f7b0 1710 rspiocbp = NULL;
dea3101e 1711 }
1712
1713 /*
1714 * If the port response put pointer has not been updated, sync
1715 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1716 * response put pointer.
1717 */
1718 if (pring->rspidx == portRspPut) {
1719 portRspPut = le32_to_cpu(pgp->rspPutInx);
1720 }
1721 } /* while (pring->rspidx != portRspPut) */
1722
92d7f7b0 1723 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 1724 /* At least one response entry has been freed */
1725 pring->stats.iocb_rsp_full++;
1726 /* SET RxRE_RSP in Chip Att register */
1727 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1728 writel(status, phba->CAregaddr);
1729 readl(phba->CAregaddr); /* flush */
1730 }
1731 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1732 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1733 pring->stats.iocb_cmd_empty++;
1734
1735 /* Force update of the local copy of cmdGetInx */
1736 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1737 lpfc_sli_resume_iocb(phba, pring);
1738
1739 if ((pring->lpfc_sli_cmd_available))
1740 (pring->lpfc_sli_cmd_available) (phba, pring);
1741
1742 }
1743
2e0fef85 1744 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 1745 return rc;
1746}
1747
2e0fef85 1748void
dea3101e 1749lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1750{
2534ba75 1751 LIST_HEAD(completions);
dea3101e 1752 struct lpfc_iocbq *iocb, *next_iocb;
2534ba75 1753 IOCB_t *cmd = NULL;
dea3101e 1754
92d7f7b0
JS
1755 if (pring->ringno == LPFC_ELS_RING) {
1756 lpfc_fabric_abort_hba(phba);
1757 }
1758
dea3101e 1759 /* Error everything on txq and txcmplq
1760 * First do the txq.
1761 */
2e0fef85 1762 spin_lock_irq(&phba->hbalock);
2534ba75 1763 list_splice_init(&pring->txq, &completions);
dea3101e 1764 pring->txq_cnt = 0;
dea3101e 1765
1766 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
1767 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1768 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 1769
2e0fef85 1770 spin_unlock_irq(&phba->hbalock);
dea3101e 1771
2534ba75
JS
1772 while (!list_empty(&completions)) {
1773 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1774 cmd = &iocb->iocb;
92d7f7b0 1775 list_del_init(&iocb->list);
dea3101e 1776
2e0fef85
JS
1777 if (!iocb->iocb_cmpl)
1778 lpfc_sli_release_iocbq(phba, iocb);
1779 else {
dea3101e 1780 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1781 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
dea3101e 1782 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 1783 }
dea3101e 1784 }
dea3101e 1785}
1786
41415862 1787int
2e0fef85 1788lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
dea3101e 1789{
41415862
JW
1790 uint32_t status;
1791 int i = 0;
1792 int retval = 0;
dea3101e 1793
41415862
JW
1794 /* Read the HBA Host Status Register */
1795 status = readl(phba->HSregaddr);
dea3101e 1796
41415862
JW
1797 /*
1798 * Check status register every 100ms for 5 retries, then every
1799 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1800 * every 2.5 sec for 4.
1801 * Break our of the loop if errors occurred during init.
1802 */
1803 while (((status & mask) != mask) &&
1804 !(status & HS_FFERM) &&
1805 i++ < 20) {
dea3101e 1806
41415862
JW
1807 if (i <= 5)
1808 msleep(10);
1809 else if (i <= 10)
1810 msleep(500);
1811 else
1812 msleep(2500);
dea3101e 1813
41415862 1814 if (i == 15) {
2e0fef85 1815 /* Do post */
92d7f7b0 1816 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
1817 lpfc_sli_brdrestart(phba);
1818 }
1819 /* Read the HBA Host Status Register */
1820 status = readl(phba->HSregaddr);
1821 }
dea3101e 1822
41415862
JW
1823 /* Check to see if any errors occurred during init */
1824 if ((status & HS_FFERM) || (i >= 20)) {
2e0fef85 1825 phba->link_state = LPFC_HBA_ERROR;
41415862 1826 retval = 1;
dea3101e 1827 }
dea3101e 1828
41415862
JW
1829 return retval;
1830}
dea3101e 1831
9290831f
JS
1832#define BARRIER_TEST_PATTERN (0xdeadbeef)
1833
2e0fef85 1834void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 1835{
65a29c16
JS
1836 uint32_t __iomem *resp_buf;
1837 uint32_t __iomem *mbox_buf;
9290831f
JS
1838 volatile uint32_t mbox;
1839 uint32_t hc_copy;
1840 int i;
1841 uint8_t hdrtype;
1842
1843 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1844 if (hdrtype != 0x80 ||
1845 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1846 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1847 return;
1848
1849 /*
1850 * Tell the other part of the chip to suspend temporarily all
1851 * its DMA activity.
1852 */
65a29c16 1853 resp_buf = phba->MBslimaddr;
9290831f
JS
1854
1855 /* Disable the error attention */
1856 hc_copy = readl(phba->HCregaddr);
1857 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1858 readl(phba->HCregaddr); /* flush */
2e0fef85 1859 phba->link_flag |= LS_IGNORE_ERATT;
9290831f
JS
1860
1861 if (readl(phba->HAregaddr) & HA_ERATT) {
1862 /* Clear Chip error bit */
1863 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1864 phba->pport->stopped = 1;
9290831f
JS
1865 }
1866
1867 mbox = 0;
1868 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1869 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1870
1871 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 1872 mbox_buf = phba->MBslimaddr;
9290831f
JS
1873 writel(mbox, mbox_buf);
1874
1875 for (i = 0;
1876 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1877 mdelay(1);
1878
1879 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1880 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
2e0fef85 1881 phba->pport->stopped)
9290831f
JS
1882 goto restore_hc;
1883 else
1884 goto clear_errat;
1885 }
1886
1887 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1888 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
1889 mdelay(1);
1890
1891clear_errat:
1892
1893 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1894 mdelay(1);
1895
1896 if (readl(phba->HAregaddr) & HA_ERATT) {
1897 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1898 phba->pport->stopped = 1;
9290831f
JS
1899 }
1900
1901restore_hc:
2e0fef85 1902 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
1903 writel(hc_copy, phba->HCregaddr);
1904 readl(phba->HCregaddr); /* flush */
1905}
1906
41415862 1907int
2e0fef85 1908lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
1909{
1910 struct lpfc_sli *psli;
1911 LPFC_MBOXQ_t *pmb;
1912 uint32_t status;
1913 uint32_t ha_copy;
1914 int retval;
1915 int i = 0;
dea3101e 1916
41415862 1917 psli = &phba->sli;
dea3101e 1918
41415862 1919 /* Kill HBA */
ed957684 1920 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
1921 "0329 Kill HBA Data: x%x x%x\n",
1922 phba->pport->port_state, psli->sli_flag);
41415862
JW
1923
1924 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9290831f 1925 GFP_KERNEL)) == 0)
41415862 1926 return 1;
41415862
JW
1927
1928 /* Disable the error attention */
2e0fef85 1929 spin_lock_irq(&phba->hbalock);
41415862
JW
1930 status = readl(phba->HCregaddr);
1931 status &= ~HC_ERINT_ENA;
1932 writel(status, phba->HCregaddr);
1933 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
1934 phba->link_flag |= LS_IGNORE_ERATT;
1935 spin_unlock_irq(&phba->hbalock);
41415862
JW
1936
1937 lpfc_kill_board(phba, pmb);
1938 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1939 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1940
1941 if (retval != MBX_SUCCESS) {
1942 if (retval != MBX_BUSY)
1943 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
1944 spin_lock_irq(&phba->hbalock);
1945 phba->link_flag &= ~LS_IGNORE_ERATT;
1946 spin_unlock_irq(&phba->hbalock);
41415862
JW
1947 return 1;
1948 }
1949
9290831f
JS
1950 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1951
41415862
JW
1952 mempool_free(pmb, phba->mbox_mem_pool);
1953
1954 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1955 * attention every 100ms for 3 seconds. If we don't get ERATT after
1956 * 3 seconds we still set HBA_ERROR state because the status of the
1957 * board is now undefined.
1958 */
1959 ha_copy = readl(phba->HAregaddr);
1960
1961 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1962 mdelay(100);
1963 ha_copy = readl(phba->HAregaddr);
1964 }
1965
1966 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
1967 if (ha_copy & HA_ERATT) {
1968 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1969 phba->pport->stopped = 1;
9290831f 1970 }
2e0fef85 1971 spin_lock_irq(&phba->hbalock);
41415862 1972 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85
JS
1973 phba->link_flag &= ~LS_IGNORE_ERATT;
1974 spin_unlock_irq(&phba->hbalock);
41415862
JW
1975
1976 psli->mbox_active = NULL;
1977 lpfc_hba_down_post(phba);
2e0fef85 1978 phba->link_state = LPFC_HBA_ERROR;
41415862 1979
2e0fef85 1980 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 1981}
1982
41415862 1983int
2e0fef85 1984lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 1985{
41415862 1986 struct lpfc_sli *psli;
dea3101e 1987 struct lpfc_sli_ring *pring;
41415862 1988 uint16_t cfg_value;
dea3101e 1989 int i;
dea3101e 1990
41415862 1991 psli = &phba->sli;
dea3101e 1992
41415862
JW
1993 /* Reset HBA */
1994 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 1995 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 1996 phba->pport->port_state, psli->sli_flag);
dea3101e 1997
1998 /* perform board reset */
1999 phba->fc_eventTag = 0;
2e0fef85
JS
2000 phba->pport->fc_myDID = 0;
2001 phba->pport->fc_prevDID = 0;
dea3101e 2002
41415862
JW
2003 /* Turn off parity checking and serr during the physical reset */
2004 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
2005 pci_write_config_word(phba->pcidev, PCI_COMMAND,
2006 (cfg_value &
2007 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2008
1c067a42 2009 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
41415862
JW
2010 /* Now toggle INITFF bit in the Host Control Register */
2011 writel(HC_INITFF, phba->HCregaddr);
2012 mdelay(1);
2013 readl(phba->HCregaddr); /* flush */
2014 writel(0, phba->HCregaddr);
2015 readl(phba->HCregaddr); /* flush */
2016
2017 /* Restore PCI cmd register */
2018 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 2019
2020 /* Initialize relevant SLI info */
41415862
JW
2021 for (i = 0; i < psli->num_rings; i++) {
2022 pring = &psli->ring[i];
dea3101e 2023 pring->flag = 0;
2024 pring->rspidx = 0;
2025 pring->next_cmdidx = 0;
2026 pring->local_getidx = 0;
2027 pring->cmdidx = 0;
2028 pring->missbufcnt = 0;
2029 }
dea3101e 2030
2e0fef85 2031 phba->link_state = LPFC_WARM_START;
41415862
JW
2032 return 0;
2033}
2034
2035int
2e0fef85 2036lpfc_sli_brdrestart(struct lpfc_hba *phba)
41415862
JW
2037{
2038 MAILBOX_t *mb;
2039 struct lpfc_sli *psli;
2040 uint16_t skip_post;
2041 volatile uint32_t word0;
2042 void __iomem *to_slim;
2043
2e0fef85 2044 spin_lock_irq(&phba->hbalock);
41415862
JW
2045
2046 psli = &phba->sli;
2047
2048 /* Restart HBA */
2049 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 2050 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 2051 phba->pport->port_state, psli->sli_flag);
41415862
JW
2052
2053 word0 = 0;
2054 mb = (MAILBOX_t *) &word0;
2055 mb->mbxCommand = MBX_RESTART;
2056 mb->mbxHc = 1;
2057
9290831f
JS
2058 lpfc_reset_barrier(phba);
2059
41415862
JW
2060 to_slim = phba->MBslimaddr;
2061 writel(*(uint32_t *) mb, to_slim);
2062 readl(to_slim); /* flush */
2063
2064 /* Only skip post after fc_ffinit is completed */
2e0fef85 2065 if (phba->pport->port_state) {
41415862
JW
2066 skip_post = 1;
2067 word0 = 1; /* This is really setting up word1 */
dea3101e 2068 } else {
41415862
JW
2069 skip_post = 0;
2070 word0 = 0; /* This is really setting up word1 */
dea3101e 2071 }
65a29c16 2072 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
2073 writel(*(uint32_t *) mb, to_slim);
2074 readl(to_slim); /* flush */
dea3101e 2075
41415862 2076 lpfc_sli_brdreset(phba);
2e0fef85
JS
2077 phba->pport->stopped = 0;
2078 phba->link_state = LPFC_INIT_START;
41415862 2079
2e0fef85 2080 spin_unlock_irq(&phba->hbalock);
41415862 2081
64ba8818
JS
2082 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2083 psli->stats_start = get_seconds();
2084
41415862
JW
2085 if (skip_post)
2086 mdelay(100);
2087 else
2088 mdelay(2000);
2089
2090 lpfc_hba_down_post(phba);
dea3101e 2091
2092 return 0;
2093}
2094
2095static int
2096lpfc_sli_chipset_init(struct lpfc_hba *phba)
2097{
2098 uint32_t status, i = 0;
2099
2100 /* Read the HBA Host Status Register */
2101 status = readl(phba->HSregaddr);
2102
2103 /* Check status register to see what current state is */
2104 i = 0;
2105 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2106
2107 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2108 * every 2.5 sec for 5, then reset board and every 2.5 sec for
2109 * 4.
2110 */
2111 if (i++ >= 20) {
2112 /* Adapter failed to init, timeout, status reg
2113 <status> */
ed957684 2114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
2115 "0436 Adapter failed to init, "
2116 "timeout, status reg x%x\n", status);
2e0fef85 2117 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2118 return -ETIMEDOUT;
2119 }
2120
2121 /* Check to see if any errors occurred during init */
2122 if (status & HS_FFERM) {
2123 /* ERROR: During chipset initialization */
2124 /* Adapter failed to init, chipset, status reg
2125 <status> */
ed957684 2126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
2127 "0437 Adapter failed to init, "
2128 "chipset, status reg x%x\n", status);
2e0fef85 2129 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2130 return -EIO;
2131 }
2132
2133 if (i <= 5) {
2134 msleep(10);
2135 } else if (i <= 10) {
2136 msleep(500);
2137 } else {
2138 msleep(2500);
2139 }
2140
2141 if (i == 15) {
2e0fef85 2142 /* Do post */
92d7f7b0 2143 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 2144 lpfc_sli_brdrestart(phba);
dea3101e 2145 }
2146 /* Read the HBA Host Status Register */
2147 status = readl(phba->HSregaddr);
2148 }
2149
2150 /* Check to see if any errors occurred during init */
2151 if (status & HS_FFERM) {
2152 /* ERROR: During chipset initialization */
2153 /* Adapter failed to init, chipset, status reg <status> */
ed957684 2154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
2155 "0438 Adapter failed to init, chipset, "
2156 "status reg x%x\n", status);
2e0fef85 2157 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2158 return -EIO;
2159 }
2160
2161 /* Clear all interrupt enable conditions */
2162 writel(0, phba->HCregaddr);
2163 readl(phba->HCregaddr); /* flush */
2164
2165 /* setup host attn register */
2166 writel(0xffffffff, phba->HAregaddr);
2167 readl(phba->HAregaddr); /* flush */
2168 return 0;
2169}
2170
78b2d852 2171int
ed957684
JS
2172lpfc_sli_hbq_count(void)
2173{
92d7f7b0 2174 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
2175}
2176
2177static int
2178lpfc_sli_hbq_entry_count(void)
2179{
2180 int hbq_count = lpfc_sli_hbq_count();
2181 int count = 0;
2182 int i;
2183
2184 for (i = 0; i < hbq_count; ++i)
92d7f7b0 2185 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
2186 return count;
2187}
2188
dea3101e 2189int
ed957684
JS
2190lpfc_sli_hbq_size(void)
2191{
2192 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2193}
2194
2195static int
2196lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2197{
2198 int hbq_count = lpfc_sli_hbq_count();
2199 LPFC_MBOXQ_t *pmb;
2200 MAILBOX_t *pmbox;
2201 uint32_t hbqno;
2202 uint32_t hbq_entry_index;
ed957684 2203
92d7f7b0
JS
2204 /* Get a Mailbox buffer to setup mailbox
2205 * commands for HBA initialization
2206 */
ed957684
JS
2207 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2208
2209 if (!pmb)
2210 return -ENOMEM;
2211
2212 pmbox = &pmb->mb;
2213
2214 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2215 phba->link_state = LPFC_INIT_MBX_CMDS;
2216
2217 hbq_entry_index = 0;
2218 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2219 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2220 phba->hbqs[hbqno].hbqPutIdx = 0;
2221 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2222 phba->hbqs[hbqno].entry_count =
92d7f7b0 2223 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
2224 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
2225 hbq_entry_index, pmb);
ed957684
JS
2226 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2227
2228 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2229 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2230 mbxStatus <status>, ring <num> */
2231
2232 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 2233 LOG_SLI | LOG_VPORT,
e8b62011 2234 "1805 Adapter failed to init. "
ed957684 2235 "Data: x%x x%x x%x\n",
e8b62011 2236 pmbox->mbxCommand,
ed957684
JS
2237 pmbox->mbxStatus, hbqno);
2238
2239 phba->link_state = LPFC_HBA_ERROR;
2240 mempool_free(pmb, phba->mbox_mem_pool);
ed957684
JS
2241 return ENXIO;
2242 }
2243 }
2244 phba->hbq_count = hbq_count;
2245
ed957684
JS
2246 mempool_free(pmb, phba->mbox_mem_pool);
2247
92d7f7b0
JS
2248 /* Initially populate or replenish the HBQs */
2249 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2250 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2251 return -ENOMEM;
2252 }
ed957684
JS
2253 return 0;
2254}
2255
2256static int
2257lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 2258{
2259 LPFC_MBOXQ_t *pmb;
2260 uint32_t resetcount = 0, rc = 0, done = 0;
2261
2262 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2263 if (!pmb) {
2e0fef85 2264 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2265 return -ENOMEM;
2266 }
2267
ed957684 2268 phba->sli_rev = sli_mode;
dea3101e 2269 while (resetcount < 2 && !done) {
2e0fef85 2270 spin_lock_irq(&phba->hbalock);
1c067a42 2271 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2272 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2273 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 2274 lpfc_sli_brdrestart(phba);
dea3101e 2275 msleep(2500);
2276 rc = lpfc_sli_chipset_init(phba);
2277 if (rc)
2278 break;
2279
2e0fef85 2280 spin_lock_irq(&phba->hbalock);
1c067a42 2281 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2282 spin_unlock_irq(&phba->hbalock);
dea3101e 2283 resetcount++;
2284
ed957684
JS
2285 /* Call pre CONFIG_PORT mailbox command initialization. A
2286 * value of 0 means the call was successful. Any other
2287 * nonzero value is a failure, but if ERESTART is returned,
2288 * the driver may reset the HBA and try again.
2289 */
dea3101e 2290 rc = lpfc_config_port_prep(phba);
2291 if (rc == -ERESTART) {
ed957684 2292 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 2293 continue;
2294 } else if (rc) {
2295 break;
2296 }
2297
2e0fef85 2298 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 2299 lpfc_config_port(phba, pmb);
2300 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
ed957684 2301 if (rc != MBX_SUCCESS) {
dea3101e 2302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 2303 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 2304 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
e8b62011 2305 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
2e0fef85 2306 spin_lock_irq(&phba->hbalock);
dea3101e 2307 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2e0fef85
JS
2308 spin_unlock_irq(&phba->hbalock);
2309 rc = -ENXIO;
ed957684
JS
2310 } else {
2311 done = 1;
92d7f7b0
JS
2312 phba->max_vpi = (phba->max_vpi &&
2313 pmb->mb.un.varCfgPort.gmv) != 0
2314 ? pmb->mb.un.varCfgPort.max_vpi
2315 : 0;
dea3101e 2316 }
2317 }
ed957684
JS
2318
2319 if (!done) {
2320 rc = -EINVAL;
2321 goto do_prep_failed;
2322 }
2323
2324 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
92d7f7b0 2325 (!pmb->mb.un.varCfgPort.cMA)) {
ed957684
JS
2326 rc = -ENXIO;
2327 goto do_prep_failed;
2328 }
2329 return rc;
2330
92d7f7b0 2331do_prep_failed:
ed957684
JS
2332 mempool_free(pmb, phba->mbox_mem_pool);
2333 return rc;
2334}
2335
2336int
2337lpfc_sli_hba_setup(struct lpfc_hba *phba)
2338{
2339 uint32_t rc;
92d7f7b0 2340 int mode = 3;
ed957684
JS
2341
2342 switch (lpfc_sli_mode) {
2343 case 2:
78b2d852 2344 if (phba->cfg_enable_npiv) {
92d7f7b0 2345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 2346 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 2347 "parameter (%d) to auto (0).\n",
e8b62011 2348 lpfc_sli_mode);
92d7f7b0
JS
2349 break;
2350 }
ed957684
JS
2351 mode = 2;
2352 break;
2353 case 0:
2354 case 3:
2355 break;
2356 default:
92d7f7b0 2357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
2358 "1819 Unrecognized lpfc_sli_mode "
2359 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
2360
2361 break;
2362 }
2363
2364 rc = lpfc_do_config_port(phba, mode);
2365 if (rc && lpfc_sli_mode == 3)
92d7f7b0 2366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
2367 "1820 Unable to select SLI-3. "
2368 "Not supported by adapter.\n");
ed957684
JS
2369 if (rc && mode != 2)
2370 rc = lpfc_do_config_port(phba, 2);
2371 if (rc)
dea3101e 2372 goto lpfc_sli_hba_setup_error;
2373
ed957684
JS
2374 if (phba->sli_rev == 3) {
2375 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2376 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2377 phba->sli3_options |= LPFC_SLI3_ENABLED;
2378 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2379
2380 } else {
2381 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2382 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 2383 phba->sli3_options = 0;
ed957684
JS
2384 }
2385
2386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
2387 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
2388 phba->sli_rev, phba->max_vpi);
ed957684 2389 rc = lpfc_sli_ring_map(phba);
dea3101e 2390
2391 if (rc)
2392 goto lpfc_sli_hba_setup_error;
2393
92d7f7b0 2394 /* Init HBQs */
ed957684
JS
2395
2396 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2397 rc = lpfc_sli_hbq_setup(phba);
2398 if (rc)
2399 goto lpfc_sli_hba_setup_error;
2400 }
2401
dea3101e 2402 phba->sli.sli_flag |= LPFC_PROCESS_LA;
2403
2404 rc = lpfc_config_port_post(phba);
2405 if (rc)
2406 goto lpfc_sli_hba_setup_error;
2407
ed957684
JS
2408 return rc;
2409
92d7f7b0 2410lpfc_sli_hba_setup_error:
2e0fef85 2411 phba->link_state = LPFC_HBA_ERROR;
ed957684 2412 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 2413 "0445 Firmware initialization failed\n");
dea3101e 2414 return rc;
2415}
2416
dea3101e 2417/*! lpfc_mbox_timeout
2418 *
2419 * \pre
2420 * \post
2421 * \param hba Pointer to per struct lpfc_hba structure
2422 * \param l1 Pointer to the driver's mailbox queue.
2423 * \return
2424 * void
2425 *
2426 * \b Description:
2427 *
2428 * This routine handles mailbox timeout events at timer interrupt context.
2429 */
2430void
2431lpfc_mbox_timeout(unsigned long ptr)
2432{
92d7f7b0 2433 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
dea3101e 2434 unsigned long iflag;
2e0fef85 2435 uint32_t tmo_posted;
dea3101e 2436
2e0fef85 2437 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
92d7f7b0 2438 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2e0fef85
JS
2439 if (!tmo_posted)
2440 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2441 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2442
2443 if (!tmo_posted) {
92d7f7b0 2444 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 2445 if (phba->work_wait)
92d7f7b0
JS
2446 lpfc_worker_wake_up(phba);
2447 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 2448 }
dea3101e 2449}
2450
2451void
2452lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2453{
2e0fef85
JS
2454 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2455 MAILBOX_t *mb = &pmbox->mb;
1dcb58e5
JS
2456 struct lpfc_sli *psli = &phba->sli;
2457 struct lpfc_sli_ring *pring;
dea3101e 2458
2e0fef85 2459 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
dea3101e 2460 return;
2461 }
2462
dea3101e 2463 /* Mbox cmd <mbxCommand> timeout */
ed957684 2464 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2465 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
92d7f7b0
JS
2466 mb->mbxCommand,
2467 phba->pport->port_state,
2468 phba->sli.sli_flag,
2469 phba->sli.mbox_active);
dea3101e 2470
1dcb58e5
JS
2471 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2472 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2473 * it to fail all oustanding SCSI IO.
2474 */
2e0fef85
JS
2475 spin_lock_irq(&phba->pport->work_port_lock);
2476 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2477 spin_unlock_irq(&phba->pport->work_port_lock);
2478 spin_lock_irq(&phba->hbalock);
2479 phba->link_state = LPFC_LINK_UNKNOWN;
2480 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
1dcb58e5 2481 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2e0fef85 2482 spin_unlock_irq(&phba->hbalock);
1dcb58e5
JS
2483
2484 pring = &psli->ring[psli->fcp_ring];
2485 lpfc_sli_abort_iocb_ring(phba, pring);
2486
2487 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2488 "0316 Resetting board due to mailbox timeout\n");
1dcb58e5
JS
2489 /*
2490 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2491 * on oustanding mailbox commands.
2492 */
2493 lpfc_offline_prep(phba);
2494 lpfc_offline(phba);
2495 lpfc_sli_brdrestart(phba);
2496 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2497 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2498 lpfc_unblock_mgmt_io(phba);
dea3101e 2499 return;
2500}
2501
2502int
2e0fef85 2503lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
dea3101e 2504{
dea3101e 2505 MAILBOX_t *mb;
2e0fef85 2506 struct lpfc_sli *psli = &phba->sli;
dea3101e 2507 uint32_t status, evtctr;
2508 uint32_t ha_copy;
2509 int i;
2510 unsigned long drvr_flag = 0;
2511 volatile uint32_t word0, ldata;
2512 void __iomem *to_slim;
2513
ed957684 2514 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 2515 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684
JS
2516 if(!pmbox->vport) {
2517 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 2518 LOG_MBOX | LOG_VPORT,
e8b62011 2519 "1806 Mbox x%x failed. No vport\n",
ed957684
JS
2520 pmbox->mb.mbxCommand);
2521 dump_stack();
2522 return MBXERR_ERROR;
2523 }
2524 }
2525
92d7f7b0 2526
8d63f375
LV
2527 /* If the PCI channel is in offline state, do not post mbox. */
2528 if (unlikely(pci_channel_offline(phba->pcidev)))
2529 return MBX_NOT_FINISHED;
2530
2e0fef85 2531 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2532 psli = &phba->sli;
92d7f7b0
JS
2533
2534
dea3101e 2535 mb = &pmbox->mb;
2536 status = MBX_SUCCESS;
2537
2e0fef85
JS
2538 if (phba->link_state == LPFC_HBA_ERROR) {
2539 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
2540
2541 /* Mbox command <mbxCommand> cannot issue */
92d7f7b0 2542 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2e0fef85 2543 return MBX_NOT_FINISHED;
41415862
JW
2544 }
2545
9290831f
JS
2546 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2547 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2e0fef85 2548 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
92d7f7b0 2549 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2e0fef85 2550 return MBX_NOT_FINISHED;
9290831f
JS
2551 }
2552
dea3101e 2553 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2554 /* Polling for a mbox command when another one is already active
2555 * is not allowed in SLI. Also, the driver must have established
2556 * SLI2 mode to queue and process multiple mbox commands.
2557 */
2558
2559 if (flag & MBX_POLL) {
2e0fef85 2560 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2561
2562 /* Mbox command <mbxCommand> cannot issue */
92d7f7b0 2563 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2e0fef85 2564 return MBX_NOT_FINISHED;
dea3101e 2565 }
2566
2567 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2e0fef85 2568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2569 /* Mbox command <mbxCommand> cannot issue */
92d7f7b0 2570 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2e0fef85 2571 return MBX_NOT_FINISHED;
dea3101e 2572 }
2573
2574 /* Handle STOP IOCB processing flag. This is only meaningful
2575 * if we are not polling for mbox completion.
2576 */
2577 if (flag & MBX_STOP_IOCB) {
2578 flag &= ~MBX_STOP_IOCB;
2579 /* Now flag each ring */
2580 for (i = 0; i < psli->num_rings; i++) {
2581 /* If the ring is active, flag it */
2582 if (psli->ring[i].cmdringaddr) {
2583 psli->ring[i].flag |=
2584 LPFC_STOP_IOCB_MBX;
2585 }
2586 }
2587 }
2588
2589 /* Another mailbox command is still being processed, queue this
2590 * command to be processed later.
2591 */
2592 lpfc_mbox_put(phba, pmbox);
2593
2594 /* Mbox cmd issue - BUSY */
ed957684 2595 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 2596 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 2597 "x%x x%x x%x x%x\n",
92d7f7b0
JS
2598 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2599 mb->mbxCommand, phba->pport->port_state,
2600 psli->sli_flag, flag);
dea3101e 2601
2602 psli->slistat.mbox_busy++;
2e0fef85 2603 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2604
858c9f6c
JS
2605 if (pmbox->vport) {
2606 lpfc_debugfs_disc_trc(pmbox->vport,
2607 LPFC_DISC_TRC_MBOX_VPORT,
2608 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
2609 (uint32_t)mb->mbxCommand,
2610 mb->un.varWords[0], mb->un.varWords[1]);
2611 }
2612 else {
2613 lpfc_debugfs_disc_trc(phba->pport,
2614 LPFC_DISC_TRC_MBOX,
2615 "MBOX Bsy: cmd:x%x mb:x%x x%x",
2616 (uint32_t)mb->mbxCommand,
2617 mb->un.varWords[0], mb->un.varWords[1]);
2618 }
2619
2e0fef85 2620 return MBX_BUSY;
dea3101e 2621 }
2622
2623 /* Handle STOP IOCB processing flag. This is only meaningful
2624 * if we are not polling for mbox completion.
2625 */
2626 if (flag & MBX_STOP_IOCB) {
2627 flag &= ~MBX_STOP_IOCB;
2628 if (flag == MBX_NOWAIT) {
2629 /* Now flag each ring */
2630 for (i = 0; i < psli->num_rings; i++) {
2631 /* If the ring is active, flag it */
2632 if (psli->ring[i].cmdringaddr) {
2633 psli->ring[i].flag |=
2634 LPFC_STOP_IOCB_MBX;
2635 }
2636 }
2637 }
2638 }
2639
2640 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2641
2642 /* If we are not polling, we MUST be in SLI2 mode */
2643 if (flag != MBX_POLL) {
41415862
JW
2644 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2645 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 2646 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2647 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2648 /* Mbox command <mbxCommand> cannot issue */
92d7f7b0 2649 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2e0fef85 2650 return MBX_NOT_FINISHED;
dea3101e 2651 }
2652 /* timeout active mbox command */
a309a6b6
JS
2653 mod_timer(&psli->mbox_tmo, (jiffies +
2654 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
dea3101e 2655 }
2656
2657 /* Mailbox cmd <cmd> issue */
ed957684 2658 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 2659 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 2660 "x%x\n",
e8b62011 2661 pmbox->vport ? pmbox->vport->vpi : 0,
92d7f7b0
JS
2662 mb->mbxCommand, phba->pport->port_state,
2663 psli->sli_flag, flag);
dea3101e 2664
858c9f6c
JS
2665 if (mb->mbxCommand != MBX_HEARTBEAT) {
2666 if (pmbox->vport) {
2667 lpfc_debugfs_disc_trc(pmbox->vport,
2668 LPFC_DISC_TRC_MBOX_VPORT,
2669 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2670 (uint32_t)mb->mbxCommand,
2671 mb->un.varWords[0], mb->un.varWords[1]);
2672 }
2673 else {
2674 lpfc_debugfs_disc_trc(phba->pport,
2675 LPFC_DISC_TRC_MBOX,
2676 "MBOX Send: cmd:x%x mb:x%x x%x",
2677 (uint32_t)mb->mbxCommand,
2678 mb->un.varWords[0], mb->un.varWords[1]);
2679 }
2680 }
2681
dea3101e 2682 psli->slistat.mbox_cmd++;
2683 evtctr = psli->slistat.mbox_event;
2684
2685 /* next set own bit for the adapter and copy over command word */
2686 mb->mbxOwner = OWN_CHIP;
2687
2688 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
dea3101e 2689 /* First copy command data to host SLIM area */
4cc2da1d 2690 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
dea3101e 2691 } else {
9290831f 2692 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 2693 /* copy command data into host mbox for cmpl */
4cc2da1d 2694 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
92d7f7b0 2695 MAILBOX_CMD_SIZE);
dea3101e 2696 }
2697
2698 /* First copy mbox command data to HBA SLIM, skip past first
2699 word */
2700 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2701 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2702 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2703
2704 /* Next copy over first word, with mbxOwner set */
2705 ldata = *((volatile uint32_t *)mb);
2706 to_slim = phba->MBslimaddr;
2707 writel(ldata, to_slim);
2708 readl(to_slim); /* flush */
2709
2710 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2711 /* switch over to host mailbox */
2712 psli->sli_flag |= LPFC_SLI2_ACTIVE;
2713 }
2714 }
2715
2716 wmb();
2717 /* interrupt board to doit right away */
2718 writel(CA_MBATT, phba->CAregaddr);
2719 readl(phba->CAregaddr); /* flush */
2720
2721 switch (flag) {
2722 case MBX_NOWAIT:
2723 /* Don't wait for it to finish, just return */
2724 psli->mbox_active = pmbox;
2725 break;
2726
2727 case MBX_POLL:
dea3101e 2728 psli->mbox_active = NULL;
2729 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2730 /* First read mbox status word */
4cc2da1d 2731 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
dea3101e 2732 word0 = le32_to_cpu(word0);
2733 } else {
2734 /* First read mbox status word */
2735 word0 = readl(phba->MBslimaddr);
2736 }
2737
2738 /* Read the HBA Host Attention Register */
2739 ha_copy = readl(phba->HAregaddr);
2740
a309a6b6
JS
2741 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2742 i *= 1000; /* Convert to ms */
2743
dea3101e 2744 /* Wait for command to complete */
41415862
JW
2745 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2746 (!(ha_copy & HA_MBATT) &&
2e0fef85 2747 (phba->link_state > LPFC_WARM_START))) {
a309a6b6 2748 if (i-- <= 0) {
dea3101e 2749 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2750 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 2751 drvr_flag);
2e0fef85 2752 return MBX_NOT_FINISHED;
dea3101e 2753 }
2754
2755 /* Check if we took a mbox interrupt while we were
2756 polling */
2757 if (((word0 & OWN_CHIP) != OWN_CHIP)
2758 && (evtctr != psli->slistat.mbox_event))
2759 break;
2760
2e0fef85 2761 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 2762 drvr_flag);
2763
1dcb58e5 2764 msleep(1);
dea3101e 2765
2e0fef85 2766 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2767
2768 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2769 /* First copy command data */
4cc2da1d
JSEC
2770 word0 = *((volatile uint32_t *)
2771 &phba->slim2p->mbx);
dea3101e 2772 word0 = le32_to_cpu(word0);
2773 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2774 MAILBOX_t *slimmb;
2775 volatile uint32_t slimword0;
2776 /* Check real SLIM for any errors */
2777 slimword0 = readl(phba->MBslimaddr);
2778 slimmb = (MAILBOX_t *) & slimword0;
2779 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2780 && slimmb->mbxStatus) {
2781 psli->sli_flag &=
2782 ~LPFC_SLI2_ACTIVE;
2783 word0 = slimword0;
2784 }
2785 }
2786 } else {
2787 /* First copy command data */
2788 word0 = readl(phba->MBslimaddr);
2789 }
2790 /* Read the HBA Host Attention Register */
2791 ha_copy = readl(phba->HAregaddr);
2792 }
2793
2794 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
dea3101e 2795 /* copy results back to user */
4cc2da1d 2796 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
92d7f7b0 2797 MAILBOX_CMD_SIZE);
dea3101e 2798 } else {
2799 /* First copy command data */
2800 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2801 MAILBOX_CMD_SIZE);
2802 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2803 pmbox->context2) {
92d7f7b0 2804 lpfc_memcpy_from_slim((void *)pmbox->context2,
dea3101e 2805 phba->MBslimaddr + DMP_RSP_OFFSET,
2806 mb->un.varDmp.word_cnt);
2807 }
2808 }
2809
2810 writel(HA_MBATT, phba->HAregaddr);
2811 readl(phba->HAregaddr); /* flush */
2812
2813 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2814 status = mb->mbxStatus;
2815 }
2816
2e0fef85
JS
2817 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2818 return status;
dea3101e 2819}
2820
92d7f7b0
JS
2821/*
2822 * Caller needs to hold lock.
2823 */
858c9f6c 2824static void
92d7f7b0 2825__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 2826 struct lpfc_iocbq *piocb)
dea3101e 2827{
2828 /* Insert the caller's iocb in the txq tail for later processing. */
2829 list_add_tail(&piocb->list, &pring->txq);
2830 pring->txq_cnt++;
dea3101e 2831}
2832
2833static struct lpfc_iocbq *
2834lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 2835 struct lpfc_iocbq **piocb)
dea3101e 2836{
2837 struct lpfc_iocbq * nextiocb;
2838
2839 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2840 if (!nextiocb) {
2841 nextiocb = *piocb;
2842 *piocb = NULL;
2843 }
2844
2845 return nextiocb;
2846}
2847
92d7f7b0
JS
2848/*
2849 * Lockless version of lpfc_sli_issue_iocb.
2850 */
dea3101e 2851int
92d7f7b0 2852__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 2853 struct lpfc_iocbq *piocb, uint32_t flag)
2854{
2855 struct lpfc_iocbq *nextiocb;
2856 IOCB_t *iocb;
2857
92d7f7b0
JS
2858 if (piocb->iocb_cmpl && (!piocb->vport) &&
2859 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2860 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2861 lpfc_printf_log(phba, KERN_ERR,
2862 LOG_SLI | LOG_VPORT,
e8b62011 2863 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
2864 piocb->iocb.ulpCommand);
2865 dump_stack();
2866 return IOCB_ERROR;
2867 }
2868
2869
8d63f375
LV
2870 /* If the PCI channel is in offline state, do not post iocbs. */
2871 if (unlikely(pci_channel_offline(phba->pcidev)))
2872 return IOCB_ERROR;
2873
dea3101e 2874 /*
2875 * We should never get an IOCB if we are in a < LINK_DOWN state
2876 */
2e0fef85 2877 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 2878 return IOCB_ERROR;
2879
2880 /*
2881 * Check to see if we are blocking IOCB processing because of a
2882 * outstanding mbox command.
2883 */
2884 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2885 goto iocb_busy;
2886
2e0fef85 2887 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 2888 /*
2680eeaa 2889 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 2890 * can be issued if the link is not up.
2891 */
2892 switch (piocb->iocb.ulpCommand) {
2893 case CMD_QUE_RING_BUF_CN:
2894 case CMD_QUE_RING_BUF64_CN:
dea3101e 2895 /*
2896 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2897 * completion, iocb_cmpl MUST be 0.
2898 */
2899 if (piocb->iocb_cmpl)
2900 piocb->iocb_cmpl = NULL;
2901 /*FALLTHROUGH*/
2902 case CMD_CREATE_XRI_CR:
2680eeaa
JS
2903 case CMD_CLOSE_XRI_CN:
2904 case CMD_CLOSE_XRI_CX:
dea3101e 2905 break;
2906 default:
2907 goto iocb_busy;
2908 }
2909
2910 /*
2911 * For FCP commands, we must be in a state where we can process link
2912 * attention events.
2913 */
2914 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 2915 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 2916 goto iocb_busy;
92d7f7b0 2917 }
dea3101e 2918
dea3101e 2919 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2920 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2921 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2922
2923 if (iocb)
2924 lpfc_sli_update_ring(phba, pring);
2925 else
2926 lpfc_sli_update_full_ring(phba, pring);
2927
2928 if (!piocb)
2929 return IOCB_SUCCESS;
2930
2931 goto out_busy;
2932
2933 iocb_busy:
2934 pring->stats.iocb_cmd_delay++;
2935
2936 out_busy:
2937
2938 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 2939 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 2940 return IOCB_SUCCESS;
2941 }
2942
2943 return IOCB_BUSY;
2944}
2945
92d7f7b0
JS
2946
2947int
2948lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2949 struct lpfc_iocbq *piocb, uint32_t flag)
2950{
2951 unsigned long iflags;
2952 int rc;
2953
2954 spin_lock_irqsave(&phba->hbalock, iflags);
2955 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2956 spin_unlock_irqrestore(&phba->hbalock, iflags);
2957
2958 return rc;
2959}
2960
cf5bf97e
JW
2961static int
2962lpfc_extra_ring_setup( struct lpfc_hba *phba)
2963{
2964 struct lpfc_sli *psli;
2965 struct lpfc_sli_ring *pring;
2966
2967 psli = &phba->sli;
2968
2969 /* Adjust cmd/rsp ring iocb entries more evenly */
a4bc3379
JS
2970
2971 /* Take some away from the FCP ring */
cf5bf97e
JW
2972 pring = &psli->ring[psli->fcp_ring];
2973 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2974 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2975 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2976 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2977
a4bc3379
JS
2978 /* and give them to the extra ring */
2979 pring = &psli->ring[psli->extra_ring];
2980
cf5bf97e
JW
2981 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2982 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2983 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2984 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2985
2986 /* Setup default profile for this ring */
2987 pring->iotag_max = 4096;
2988 pring->num_mask = 1;
2989 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
2990 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2991 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
2992 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2993 return 0;
2994}
2995
dea3101e 2996int
2997lpfc_sli_setup(struct lpfc_hba *phba)
2998{
ed957684 2999 int i, totiocbsize = 0;
dea3101e 3000 struct lpfc_sli *psli = &phba->sli;
3001 struct lpfc_sli_ring *pring;
3002
3003 psli->num_rings = MAX_CONFIGURED_RINGS;
3004 psli->sli_flag = 0;
3005 psli->fcp_ring = LPFC_FCP_RING;
3006 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 3007 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 3008
604a3e30
JB
3009 psli->iocbq_lookup = NULL;
3010 psli->iocbq_lookup_len = 0;
3011 psli->last_iotag = 0;
3012
dea3101e 3013 for (i = 0; i < psli->num_rings; i++) {
3014 pring = &psli->ring[i];
3015 switch (i) {
3016 case LPFC_FCP_RING: /* ring 0 - FCP */
3017 /* numCiocb and numRiocb are used in config_port */
3018 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3019 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3020 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3021 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3022 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3023 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
ed957684 3024 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3025 SLI3_IOCB_CMD_SIZE :
3026 SLI2_IOCB_CMD_SIZE;
ed957684 3027 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3028 SLI3_IOCB_RSP_SIZE :
3029 SLI2_IOCB_RSP_SIZE;
dea3101e 3030 pring->iotag_ctr = 0;
3031 pring->iotag_max =
92d7f7b0 3032 (phba->cfg_hba_queue_depth * 2);
dea3101e 3033 pring->fast_iotag = pring->iotag_max;
3034 pring->num_mask = 0;
3035 break;
a4bc3379 3036 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 3037 /* numCiocb and numRiocb are used in config_port */
3038 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3039 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
ed957684 3040 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3041 SLI3_IOCB_CMD_SIZE :
3042 SLI2_IOCB_CMD_SIZE;
ed957684 3043 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3044 SLI3_IOCB_RSP_SIZE :
3045 SLI2_IOCB_RSP_SIZE;
2e0fef85 3046 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 3047 pring->num_mask = 0;
3048 break;
3049 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
3050 /* numCiocb and numRiocb are used in config_port */
3051 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3052 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
ed957684 3053 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3054 SLI3_IOCB_CMD_SIZE :
3055 SLI2_IOCB_CMD_SIZE;
ed957684 3056 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
3057 SLI3_IOCB_RSP_SIZE :
3058 SLI2_IOCB_RSP_SIZE;
dea3101e 3059 pring->fast_iotag = 0;
3060 pring->iotag_ctr = 0;
3061 pring->iotag_max = 4096;
3062 pring->num_mask = 4;
3063 pring->prt[0].profile = 0; /* Mask 0 */
3064 pring->prt[0].rctl = FC_ELS_REQ;
3065 pring->prt[0].type = FC_ELS_DATA;
3066 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 3067 lpfc_els_unsol_event;
dea3101e 3068 pring->prt[1].profile = 0; /* Mask 1 */
3069 pring->prt[1].rctl = FC_ELS_RSP;
3070 pring->prt[1].type = FC_ELS_DATA;
3071 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 3072 lpfc_els_unsol_event;
dea3101e 3073 pring->prt[2].profile = 0; /* Mask 2 */
3074 /* NameServer Inquiry */
3075 pring->prt[2].rctl = FC_UNSOL_CTL;
3076 /* NameServer */
3077 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3078 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 3079 lpfc_ct_unsol_event;
dea3101e 3080 pring->prt[3].profile = 0; /* Mask 3 */
3081 /* NameServer response */
3082 pring->prt[3].rctl = FC_SOL_CTL;
3083 /* NameServer */
3084 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3085 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 3086 lpfc_ct_unsol_event;
dea3101e 3087 break;
3088 }
ed957684 3089 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
92d7f7b0 3090 (pring->numRiocb * pring->sizeRiocb);
dea3101e 3091 }
ed957684 3092 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 3093 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
3094 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
3095 "SLI2 SLIM Data: x%x x%lx\n",
3096 phba->brd_no, totiocbsize,
3097 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 3098 }
cf5bf97e
JW
3099 if (phba->cfg_multi_ring_support == 2)
3100 lpfc_extra_ring_setup(phba);
dea3101e 3101
3102 return 0;
3103}
3104
3105int
2e0fef85 3106lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e 3107{
3108 struct lpfc_sli *psli;
3109 struct lpfc_sli_ring *pring;
604a3e30 3110 int i;
dea3101e 3111
3112 psli = &phba->sli;
2e0fef85 3113 spin_lock_irq(&phba->hbalock);
dea3101e 3114 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 3115 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 3116 /* Initialize list headers for txq and txcmplq as double linked lists */
3117 for (i = 0; i < psli->num_rings; i++) {
3118 pring = &psli->ring[i];
3119 pring->ringno = i;
3120 pring->next_cmdidx = 0;
3121 pring->local_getidx = 0;
3122 pring->cmdidx = 0;
3123 INIT_LIST_HEAD(&pring->txq);
3124 INIT_LIST_HEAD(&pring->txcmplq);
3125 INIT_LIST_HEAD(&pring->iocb_continueq);
3126 INIT_LIST_HEAD(&pring->postbufq);
dea3101e 3127 }
2e0fef85
JS
3128 spin_unlock_irq(&phba->hbalock);
3129 return 1;
dea3101e 3130}
3131
92d7f7b0
JS
3132int
3133lpfc_sli_host_down(struct lpfc_vport *vport)
3134{
858c9f6c 3135 LIST_HEAD(completions);
92d7f7b0
JS
3136 struct lpfc_hba *phba = vport->phba;
3137 struct lpfc_sli *psli = &phba->sli;
3138 struct lpfc_sli_ring *pring;
3139 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
3140 int i;
3141 unsigned long flags = 0;
3142 uint16_t prev_pring_flag;
3143
3144 lpfc_cleanup_discovery_resources(vport);
3145
3146 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
3147 for (i = 0; i < psli->num_rings; i++) {
3148 pring = &psli->ring[i];
3149 prev_pring_flag = pring->flag;
858c9f6c
JS
3150 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3151 pring->flag |= LPFC_DEFERRED_RING_EVENT;
92d7f7b0
JS
3152 /*
3153 * Error everything on the txq since these iocbs have not been
3154 * given to the FW yet.
3155 */
92d7f7b0
JS
3156 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3157 if (iocb->vport != vport)
3158 continue;
858c9f6c 3159 list_move_tail(&iocb->list, &completions);
92d7f7b0 3160 pring->txq_cnt--;
92d7f7b0
JS
3161 }
3162
3163 /* Next issue ABTS for everything on the txcmplq */
3164 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3165 list) {
3166 if (iocb->vport != vport)
3167 continue;
3168 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3169 }
3170
3171 pring->flag = prev_pring_flag;
3172 }
3173
3174 spin_unlock_irqrestore(&phba->hbalock, flags);
3175
858c9f6c
JS
3176 while (!list_empty(&completions)) {
3177 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3178
3179 if (!iocb->iocb_cmpl)
3180 lpfc_sli_release_iocbq(phba, iocb);
3181 else {
3182 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3183 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3184 (iocb->iocb_cmpl) (phba, iocb, iocb);
3185 }
3186 }
92d7f7b0
JS
3187 return 1;
3188}
3189
dea3101e 3190int
2e0fef85 3191lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 3192{
2534ba75 3193 LIST_HEAD(completions);
2e0fef85 3194 struct lpfc_sli *psli = &phba->sli;
dea3101e 3195 struct lpfc_sli_ring *pring;
3196 LPFC_MBOXQ_t *pmb;
2534ba75
JS
3197 struct lpfc_iocbq *iocb;
3198 IOCB_t *cmd = NULL;
dea3101e 3199 int i;
3200 unsigned long flags = 0;
3201
dea3101e 3202 lpfc_hba_down_prep(phba);
3203
92d7f7b0
JS
3204 lpfc_fabric_abort_hba(phba);
3205
2e0fef85 3206 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 3207 for (i = 0; i < psli->num_rings; i++) {
3208 pring = &psli->ring[i];
858c9f6c
JS
3209 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3210 pring->flag |= LPFC_DEFERRED_RING_EVENT;
dea3101e 3211
3212 /*
3213 * Error everything on the txq since these iocbs have not been
3214 * given to the FW yet.
3215 */
2534ba75 3216 list_splice_init(&pring->txq, &completions);
dea3101e 3217 pring->txq_cnt = 0;
3218
2534ba75 3219 }
2e0fef85 3220 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 3221
2534ba75 3222 while (!list_empty(&completions)) {
92d7f7b0 3223 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
2534ba75 3224 cmd = &iocb->iocb;
dea3101e 3225
2e0fef85
JS
3226 if (!iocb->iocb_cmpl)
3227 lpfc_sli_release_iocbq(phba, iocb);
3228 else {
2534ba75
JS
3229 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3230 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3231 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 3232 }
dea3101e 3233 }
3234
dea3101e 3235 /* Return any active mbox cmds */
3236 del_timer_sync(&psli->mbox_tmo);
92d7f7b0 3237 spin_lock_irqsave(&phba->hbalock, flags);
2e0fef85 3238
92d7f7b0 3239 spin_lock(&phba->pport->work_port_lock);
2e0fef85 3240 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
92d7f7b0 3241 spin_unlock(&phba->pport->work_port_lock);
2e0fef85 3242
92d7f7b0
JS
3243 if (psli->mbox_active) {
3244 list_add_tail(&psli->mbox_active->list, &completions);
2e0fef85 3245 psli->mbox_active = NULL;
2e0fef85 3246 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
dea3101e 3247 }
dea3101e 3248
92d7f7b0
JS
3249 /* Return any pending or completed mbox cmds */
3250 list_splice_init(&phba->sli.mboxq, &completions);
3251 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3252 INIT_LIST_HEAD(&psli->mboxq);
3253 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3254
3255 spin_unlock_irqrestore(&phba->hbalock, flags);
3256
3257 while (!list_empty(&completions)) {
3258 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
dea3101e 3259 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3260 if (pmb->mbox_cmpl) {
dea3101e 3261 pmb->mbox_cmpl(phba,pmb);
dea3101e 3262 }
3263 }
dea3101e 3264 return 1;
3265}
3266
3267void
3268lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3269{
3270 uint32_t *src = srcp;
3271 uint32_t *dest = destp;
3272 uint32_t ldata;
3273 int i;
3274
3275 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3276 ldata = *src;
3277 ldata = le32_to_cpu(ldata);
3278 *dest = ldata;
3279 src++;
3280 dest++;
3281 }
3282}
3283
3284int
2e0fef85
JS
3285lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3286 struct lpfc_dmabuf *mp)
dea3101e 3287{
3288 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3289 later */
2e0fef85 3290 spin_lock_irq(&phba->hbalock);
dea3101e 3291 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 3292 pring->postbufq_cnt++;
2e0fef85 3293 spin_unlock_irq(&phba->hbalock);
dea3101e 3294 return 0;
3295}
3296
3297
3298struct lpfc_dmabuf *
3299lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3300 dma_addr_t phys)
3301{
3302 struct lpfc_dmabuf *mp, *next_mp;
3303 struct list_head *slp = &pring->postbufq;
3304
3305 /* Search postbufq, from the begining, looking for a match on phys */
2e0fef85 3306 spin_lock_irq(&phba->hbalock);
dea3101e 3307 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3308 if (mp->phys == phys) {
3309 list_del_init(&mp->list);
3310 pring->postbufq_cnt--;
2e0fef85 3311 spin_unlock_irq(&phba->hbalock);
dea3101e 3312 return mp;
3313 }
3314 }
3315
2e0fef85 3316 spin_unlock_irq(&phba->hbalock);
dea3101e 3317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 3318 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 3319 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 3320 pring->ringno, (unsigned long long)phys,
dea3101e 3321 slp->next, slp->prev, pring->postbufq_cnt);
3322 return NULL;
3323}
3324
3325static void
2e0fef85
JS
3326lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3327 struct lpfc_iocbq *rspiocb)
dea3101e 3328{
2e0fef85 3329 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 3330 uint16_t abort_iotag, abort_context;
92d7f7b0 3331 struct lpfc_iocbq *abort_iocb;
2680eeaa
JS
3332 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3333
3334 abort_iocb = NULL;
2680eeaa
JS
3335
3336 if (irsp->ulpStatus) {
3337 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3338 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3339
2e0fef85 3340 spin_lock_irq(&phba->hbalock);
2680eeaa
JS
3341 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3342 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3343
92d7f7b0 3344 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
e8b62011 3345 "0327 Cannot abort els iocb %p "
92d7f7b0
JS
3346 "with tag %x context %x, abort status %x, "
3347 "abort code %x\n",
e8b62011
JS
3348 abort_iocb, abort_iotag, abort_context,
3349 irsp->ulpStatus, irsp->un.ulpWord[4]);
2680eeaa
JS
3350
3351 /*
3352 * make sure we have the right iocbq before taking it
3353 * off the txcmplq and try to call completion routine.
3354 */
2e0fef85
JS
3355 if (!abort_iocb ||
3356 abort_iocb->iocb.ulpContext != abort_context ||
3357 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3358 spin_unlock_irq(&phba->hbalock);
3359 else {
92d7f7b0 3360 list_del_init(&abort_iocb->list);
2680eeaa 3361 pring->txcmplq_cnt--;
2e0fef85 3362 spin_unlock_irq(&phba->hbalock);
2680eeaa 3363
92d7f7b0
JS
3364 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3365 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3366 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3367 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
2680eeaa
JS
3368 }
3369 }
3370
604a3e30 3371 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 3372 return;
3373}
3374
92d7f7b0
JS
3375static void
3376lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3377 struct lpfc_iocbq *rspiocb)
3378{
3379 IOCB_t *irsp = &rspiocb->iocb;
3380
3381 /* ELS cmd tag <ulpIoTag> completes */
3382 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
e8b62011 3383 "0133 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 3384 "x%x x%x x%x\n",
e8b62011 3385 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 3386 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
3387 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3388 lpfc_ct_free_iocb(phba, cmdiocb);
3389 else
3390 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
3391 return;
3392}
3393
dea3101e 3394int
2e0fef85
JS
3395lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3396 struct lpfc_iocbq *cmdiocb)
dea3101e 3397{
2e0fef85 3398 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 3399 struct lpfc_iocbq *abtsiocbp;
dea3101e 3400 IOCB_t *icmd = NULL;
3401 IOCB_t *iabt = NULL;
07951076
JS
3402 int retval = IOCB_ERROR;
3403
92d7f7b0
JS
3404 /*
3405 * There are certain command types we don't want to abort. And we
3406 * don't want to abort commands that are already in the process of
3407 * being aborted.
07951076
JS
3408 */
3409 icmd = &cmdiocb->iocb;
2e0fef85 3410 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
3411 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3412 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
3413 return 0;
3414
858c9f6c
JS
3415 /* If we're unloading, don't abort iocb on the ELS ring, but change the
3416 * callback so that nothing happens when it finishes.
07951076 3417 */
858c9f6c
JS
3418 if ((vport->load_flag & FC_UNLOADING) &&
3419 (pring->ringno == LPFC_ELS_RING)) {
92d7f7b0
JS
3420 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3421 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3422 else
3423 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
07951076 3424 goto abort_iotag_exit;
92d7f7b0 3425 }
dea3101e 3426
3427 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 3428 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3429 if (abtsiocbp == NULL)
3430 return 0;
dea3101e 3431
07951076
JS
3432 /* This signals the response to set the correct status
3433 * before calling the completion handler.
3434 */
3435 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3436
dea3101e 3437 iabt = &abtsiocbp->iocb;
07951076
JS
3438 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3439 iabt->un.acxri.abortContextTag = icmd->ulpContext;
3440 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3441 iabt->ulpLe = 1;
3442 iabt->ulpClass = icmd->ulpClass;
dea3101e 3443
2e0fef85 3444 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
3445 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3446 else
3447 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 3448
07951076 3449 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 3450
e8b62011
JS
3451 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3452 "0339 Abort xri x%x, original iotag x%x, "
3453 "abort cmd iotag x%x\n",
3454 iabt->un.acxri.abortContextTag,
3455 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
92d7f7b0 3456 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
dea3101e 3457
07951076 3458abort_iotag_exit:
2e0fef85
JS
3459 /*
3460 * Caller to this routine should check for IOCB_ERROR
3461 * and handle it properly. This routine no longer removes
3462 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 3463 */
2e0fef85 3464 return retval;
dea3101e 3465}
3466
3467static int
51ef4c26
JS
3468lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3469 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 3470 lpfc_ctx_cmd ctx_cmd)
dea3101e 3471{
0bd4ca25
JSEC
3472 struct lpfc_scsi_buf *lpfc_cmd;
3473 struct scsi_cmnd *cmnd;
dea3101e 3474 int rc = 1;
3475
0bd4ca25
JSEC
3476 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
3477 return rc;
3478
51ef4c26
JS
3479 if (iocbq->vport != vport)
3480 return rc;
3481
0bd4ca25
JSEC
3482 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3483 cmnd = lpfc_cmd->pCmd;
3484
3485 if (cmnd == NULL)
dea3101e 3486 return rc;
3487
3488 switch (ctx_cmd) {
3489 case LPFC_CTX_LUN:
0bd4ca25
JSEC
3490 if ((cmnd->device->id == tgt_id) &&
3491 (cmnd->device->lun == lun_id))
dea3101e 3492 rc = 0;
3493 break;
3494 case LPFC_CTX_TGT:
0bd4ca25 3495 if (cmnd->device->id == tgt_id)
dea3101e 3496 rc = 0;
3497 break;
dea3101e 3498 case LPFC_CTX_HOST:
3499 rc = 0;
3500 break;
3501 default:
3502 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3503 __FUNCTION__, ctx_cmd);
3504 break;
3505 }
3506
3507 return rc;
3508}
3509
3510int
51ef4c26
JS
3511lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3512 lpfc_ctx_cmd ctx_cmd)
dea3101e 3513{
51ef4c26 3514 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
3515 struct lpfc_iocbq *iocbq;
3516 int sum, i;
dea3101e 3517
0bd4ca25
JSEC
3518 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3519 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 3520
51ef4c26
JS
3521 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
3522 ctx_cmd) == 0)
0bd4ca25 3523 sum++;
dea3101e 3524 }
0bd4ca25 3525
dea3101e 3526 return sum;
3527}
3528
5eb95af0 3529void
2e0fef85
JS
3530lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3531 struct lpfc_iocbq *rspiocb)
5eb95af0 3532{
604a3e30 3533 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
3534 return;
3535}
3536
dea3101e 3537int
51ef4c26
JS
3538lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3539 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 3540{
51ef4c26 3541 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
3542 struct lpfc_iocbq *iocbq;
3543 struct lpfc_iocbq *abtsiocb;
dea3101e 3544 IOCB_t *cmd = NULL;
dea3101e 3545 int errcnt = 0, ret_val = 0;
0bd4ca25 3546 int i;
dea3101e 3547
0bd4ca25
JSEC
3548 for (i = 1; i <= phba->sli.last_iotag; i++) {
3549 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 3550
51ef4c26 3551 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 3552 abort_cmd) != 0)
dea3101e 3553 continue;
3554
3555 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 3556 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 3557 if (abtsiocb == NULL) {
3558 errcnt++;
3559 continue;
3560 }
dea3101e 3561
0bd4ca25 3562 cmd = &iocbq->iocb;
dea3101e 3563 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3564 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3565 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3566 abtsiocb->iocb.ulpLe = 1;
3567 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 3568 abtsiocb->vport = phba->pport;
dea3101e 3569
2e0fef85 3570 if (lpfc_is_link_up(phba))
dea3101e 3571 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3572 else
3573 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3574
5eb95af0
JSEC
3575 /* Setup callback routine and issue the command. */
3576 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
dea3101e 3577 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3578 if (ret_val == IOCB_ERROR) {
604a3e30 3579 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 3580 errcnt++;
3581 continue;
3582 }
3583 }
3584
3585 return errcnt;
3586}
3587
68876920
JSEC
3588static void
3589lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3590 struct lpfc_iocbq *cmdiocbq,
3591 struct lpfc_iocbq *rspiocbq)
dea3101e 3592{
68876920
JSEC
3593 wait_queue_head_t *pdone_q;
3594 unsigned long iflags;
dea3101e 3595
2e0fef85 3596 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
3597 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3598 if (cmdiocbq->context2 && rspiocbq)
3599 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3600 &rspiocbq->iocb, sizeof(IOCB_t));
3601
3602 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
3603 if (pdone_q)
3604 wake_up(pdone_q);
858c9f6c 3605 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 3606 return;
3607}
3608
68876920
JSEC
3609/*
3610 * Issue the caller's iocb and wait for its completion, but no longer than the
3611 * caller's timeout. Note that iocb_flags is cleared before the
3612 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3613 * definition this is a wait function.
3614 */
92d7f7b0 3615
dea3101e 3616int
2e0fef85
JS
3617lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3618 struct lpfc_sli_ring *pring,
3619 struct lpfc_iocbq *piocb,
3620 struct lpfc_iocbq *prspiocbq,
68876920 3621 uint32_t timeout)
dea3101e 3622{
7259f0d0 3623 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
3624 long timeleft, timeout_req = 0;
3625 int retval = IOCB_SUCCESS;
875fbdfe 3626 uint32_t creg_val;
dea3101e 3627
3628 /*
68876920
JSEC
3629 * If the caller has provided a response iocbq buffer, then context2
3630 * is NULL or its an error.
dea3101e 3631 */
68876920
JSEC
3632 if (prspiocbq) {
3633 if (piocb->context2)
3634 return IOCB_ERROR;
3635 piocb->context2 = prspiocbq;
dea3101e 3636 }
3637
68876920
JSEC
3638 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3639 piocb->context_un.wait_queue = &done_q;
3640 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 3641
875fbdfe
JSEC
3642 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3643 creg_val = readl(phba->HCregaddr);
3644 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3645 writel(creg_val, phba->HCregaddr);
3646 readl(phba->HCregaddr); /* flush */
3647 }
3648
68876920
JSEC
3649 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3650 if (retval == IOCB_SUCCESS) {
3651 timeout_req = timeout * HZ;
68876920
JSEC
3652 timeleft = wait_event_timeout(done_q,
3653 piocb->iocb_flag & LPFC_IO_WAKE,
3654 timeout_req);
dea3101e 3655
7054a606
JS
3656 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3657 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3658 "0331 IOCB wake signaled\n");
7054a606 3659 } else if (timeleft == 0) {
68876920 3660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
3661 "0338 IOCB wait timeout error - no "
3662 "wake response Data x%x\n", timeout);
68876920 3663 retval = IOCB_TIMEDOUT;
7054a606 3664 } else {
68876920 3665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
3666 "0330 IOCB wake NOT set, "
3667 "Data x%x x%lx\n",
68876920
JSEC
3668 timeout, (timeleft / jiffies));
3669 retval = IOCB_TIMEDOUT;
dea3101e 3670 }
68876920
JSEC
3671 } else {
3672 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3673 ":0332 IOCB wait issue failed, Data x%x\n",
3674 retval);
68876920 3675 retval = IOCB_ERROR;
dea3101e 3676 }
3677
875fbdfe
JSEC
3678 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3679 creg_val = readl(phba->HCregaddr);
3680 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3681 writel(creg_val, phba->HCregaddr);
3682 readl(phba->HCregaddr); /* flush */
3683 }
3684
68876920
JSEC
3685 if (prspiocbq)
3686 piocb->context2 = NULL;
3687
3688 piocb->context_un.wait_queue = NULL;
3689 piocb->iocb_cmpl = NULL;
dea3101e 3690 return retval;
3691}
68876920 3692
dea3101e 3693int
2e0fef85 3694lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 3695 uint32_t timeout)
3696{
7259f0d0 3697 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 3698 int retval;
858c9f6c 3699 unsigned long flag;
dea3101e 3700
3701 /* The caller must leave context1 empty. */
92d7f7b0 3702 if (pmboxq->context1 != 0)
2e0fef85 3703 return MBX_NOT_FINISHED;
dea3101e 3704
3705 /* setup wake call as IOCB callback */
3706 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3707 /* setup context field to pass wait_queue pointer to wake function */
3708 pmboxq->context1 = &done_q;
3709
dea3101e 3710 /* now issue the command */
3711 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3712
3713 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
3714 wait_event_interruptible_timeout(done_q,
3715 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3716 timeout * HZ);
3717
858c9f6c 3718 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 3719 pmboxq->context1 = NULL;
7054a606
JS
3720 /*
3721 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3722 * else do not free the resources.
3723 */
3724 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
dea3101e 3725 retval = MBX_SUCCESS;
858c9f6c 3726 else {
7054a606 3727 retval = MBX_TIMEOUT;
858c9f6c
JS
3728 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3729 }
3730 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 3731 }
3732
dea3101e 3733 return retval;
3734}
3735
b4c02652
JS
3736int
3737lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3738{
2e0fef85 3739 struct lpfc_vport *vport = phba->pport;
b4c02652 3740 int i = 0;
ed957684 3741 uint32_t ha_copy;
b4c02652 3742
2e0fef85 3743 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
b4c02652
JS
3744 if (i++ > LPFC_MBOX_TMO * 1000)
3745 return 1;
3746
ed957684
JS
3747 /*
3748 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3749 * did finish. This way we won't get the misleading
3750 * "Stray Mailbox Interrupt" message.
3751 */
3752 spin_lock_irq(&phba->hbalock);
3753 ha_copy = phba->work_ha;
3754 phba->work_ha &= ~HA_MBATT;
3755 spin_unlock_irq(&phba->hbalock);
3756
3757 if (ha_copy & HA_MBATT)
3758 if (lpfc_sli_handle_mb_event(phba) == 0)
3759 i = 0;
b4c02652
JS
3760
3761 msleep(1);
3762 }
3763
3764 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3765}
3766
dea3101e 3767irqreturn_t
7d12e780 3768lpfc_intr_handler(int irq, void *dev_id)
dea3101e 3769{
2e0fef85 3770 struct lpfc_hba *phba;
dea3101e 3771 uint32_t ha_copy;
3772 uint32_t work_ha_copy;
3773 unsigned long status;
3774 int i;
3775 uint32_t control;
3776
92d7f7b0 3777 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
3778 struct lpfc_vport *vport;
3779 struct lpfc_nodelist *ndlp;
3780 struct lpfc_dmabuf *mp;
92d7f7b0
JS
3781 LPFC_MBOXQ_t *pmb;
3782 int rc;
3783
dea3101e 3784 /*
3785 * Get the driver's phba structure from the dev_id and
3786 * assume the HBA is not interrupting.
3787 */
3788 phba = (struct lpfc_hba *) dev_id;
3789
3790 if (unlikely(!phba))
3791 return IRQ_NONE;
3792
8d63f375
LV
3793 /* If the pci channel is offline, ignore all the interrupts. */
3794 if (unlikely(pci_channel_offline(phba->pcidev)))
3795 return IRQ_NONE;
3796
dea3101e 3797 phba->sli.slistat.sli_intr++;
3798
3799 /*
3800 * Call the HBA to see if it is interrupting. If not, don't claim
3801 * the interrupt
3802 */
3803
3804 /* Ignore all interrupts during initialization. */
2e0fef85 3805 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 3806 return IRQ_NONE;
3807
3808 /*
3809 * Read host attention register to determine interrupt source
3810 * Clear Attention Sources, except Error Attention (to
3811 * preserve status) and Link Attention
3812 */
2e0fef85 3813 spin_lock(&phba->hbalock);
dea3101e 3814 ha_copy = readl(phba->HAregaddr);
ebdbe65f
JS
3815 /* If somebody is waiting to handle an eratt don't process it
3816 * here. The brdkill function will do this.
3817 */
2e0fef85 3818 if (phba->link_flag & LS_IGNORE_ERATT)
ebdbe65f 3819 ha_copy &= ~HA_ERATT;
dea3101e 3820 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3821 readl(phba->HAregaddr); /* flush */
2e0fef85 3822 spin_unlock(&phba->hbalock);
dea3101e 3823
3824 if (unlikely(!ha_copy))
3825 return IRQ_NONE;
3826
3827 work_ha_copy = ha_copy & phba->work_ha_mask;
3828
3829 if (unlikely(work_ha_copy)) {
3830 if (work_ha_copy & HA_LATT) {
3831 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3832 /*
3833 * Turn off Link Attention interrupts
3834 * until CLEAR_LA done
3835 */
2e0fef85 3836 spin_lock(&phba->hbalock);
dea3101e 3837 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3838 control = readl(phba->HCregaddr);
3839 control &= ~HC_LAINT_ENA;
3840 writel(control, phba->HCregaddr);
3841 readl(phba->HCregaddr); /* flush */
2e0fef85 3842 spin_unlock(&phba->hbalock);
dea3101e 3843 }
3844 else
3845 work_ha_copy &= ~HA_LATT;
3846 }
3847
3848 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
858c9f6c
JS
3849 /*
3850 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3851 * the only slow ring.
3852 */
3853 status = (work_ha_copy &
3854 (HA_RXMASK << (4*LPFC_ELS_RING)));
3855 status >>= (4*LPFC_ELS_RING);
3856 if (status & HA_RXMASK) {
3857 spin_lock(&phba->hbalock);
3858 control = readl(phba->HCregaddr);
a58cbd52
JS
3859
3860 lpfc_debugfs_slow_ring_trc(phba,
3861 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
3862 control, status,
3863 (uint32_t)phba->sli.slistat.sli_intr);
3864
858c9f6c 3865 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
3866 lpfc_debugfs_slow_ring_trc(phba,
3867 "ISR Disable ring:"
3868 "pwork:x%x hawork:x%x wait:x%x",
3869 phba->work_ha, work_ha_copy,
3870 (uint32_t)((unsigned long)
3871 phba->work_wait));
3872
858c9f6c
JS
3873 control &=
3874 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 3875 writel(control, phba->HCregaddr);
3876 readl(phba->HCregaddr); /* flush */
dea3101e 3877 }
a58cbd52
JS
3878 else {
3879 lpfc_debugfs_slow_ring_trc(phba,
3880 "ISR slow ring: pwork:"
3881 "x%x hawork:x%x wait:x%x",
3882 phba->work_ha, work_ha_copy,
3883 (uint32_t)((unsigned long)
3884 phba->work_wait));
3885 }
858c9f6c 3886 spin_unlock(&phba->hbalock);
dea3101e 3887 }
3888 }
3889
3890 if (work_ha_copy & HA_ERATT) {
2e0fef85 3891 phba->link_state = LPFC_HBA_ERROR;
dea3101e 3892 /*
3893 * There was a link/board error. Read the
3894 * status register to retrieve the error event
3895 * and process it.
3896 */
3897 phba->sli.slistat.err_attn_event++;
3898 /* Save status info */
3899 phba->work_hs = readl(phba->HSregaddr);
3900 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3901 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3902
3903 /* Clear Chip error bit */
3904 writel(HA_ERATT, phba->HAregaddr);
3905 readl(phba->HAregaddr); /* flush */
2e0fef85 3906 phba->pport->stopped = 1;
dea3101e 3907 }
3908
92d7f7b0
JS
3909 if ((work_ha_copy & HA_MBATT) &&
3910 (phba->sli.mbox_active)) {
3911 pmb = phba->sli.mbox_active;
3912 pmbox = &pmb->mb;
3913 mbox = &phba->slim2p->mbx;
858c9f6c 3914 vport = pmb->vport;
92d7f7b0
JS
3915
3916 /* First check out the status word */
3917 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3918 if (pmbox->mbxOwner != OWN_HOST) {
3919 /*
3920 * Stray Mailbox Interrupt, mbxCommand <cmd>
3921 * mbxStatus <status>
3922 */
3923 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3924 LOG_SLI,
e8b62011 3925 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
3926 "Interrupt mbxCommand x%x "
3927 "mbxStatus x%x\n",
e8b62011 3928 (vport ? vport->vpi : 0),
92d7f7b0
JS
3929 pmbox->mbxCommand,
3930 pmbox->mbxStatus);
3931 }
858c9f6c 3932 phba->last_completion_time = jiffies;
92d7f7b0
JS
3933 del_timer_sync(&phba->sli.mbox_tmo);
3934
92d7f7b0
JS
3935 phba->sli.mbox_active = NULL;
3936 if (pmb->mbox_cmpl) {
3937 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3938 MAILBOX_CMD_SIZE);
3939 }
858c9f6c
JS
3940 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3941 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3942
3943 lpfc_debugfs_disc_trc(vport,
3944 LPFC_DISC_TRC_MBOX_VPORT,
3945 "MBOX dflt rpi: : status:x%x rpi:x%x",
3946 (uint32_t)pmbox->mbxStatus,
3947 pmbox->un.varWords[0], 0);
3948
3949 if ( !pmbox->mbxStatus) {
3950 mp = (struct lpfc_dmabuf *)
3951 (pmb->context1);
3952 ndlp = (struct lpfc_nodelist *)
3953 pmb->context2;
3954
3955 /* Reg_LOGIN of dflt RPI was successful.
3956 * new lets get rid of the RPI using the
3957 * same mbox buffer.
3958 */
3959 lpfc_unreg_login(phba, vport->vpi,
3960 pmbox->un.varWords[0], pmb);
3961 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3962 pmb->context1 = mp;
3963 pmb->context2 = ndlp;
3964 pmb->vport = vport;
3965 spin_lock(&phba->hbalock);
3966 phba->sli.sli_flag &=
3967 ~LPFC_SLI_MBOX_ACTIVE;
3968 spin_unlock(&phba->hbalock);
3969 goto send_current_mbox;
3970 }
3971 }
3972 spin_lock(&phba->pport->work_port_lock);
3973 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3974 spin_unlock(&phba->pport->work_port_lock);
92d7f7b0
JS
3975 lpfc_mbox_cmpl_put(phba, pmb);
3976 }
3977 if ((work_ha_copy & HA_MBATT) &&
3978 (phba->sli.mbox_active == NULL)) {
3979send_next_mbox:
3980 spin_lock(&phba->hbalock);
3981 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3982 pmb = lpfc_mbox_get(phba);
3983 spin_unlock(&phba->hbalock);
858c9f6c 3984send_current_mbox:
92d7f7b0
JS
3985 /* Process next mailbox command if there is one */
3986 if (pmb != NULL) {
3987 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3988 if (rc == MBX_NOT_FINISHED) {
3989 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3990 lpfc_mbox_cmpl_put(phba, pmb);
3991 goto send_next_mbox;
3992 }
3993 } else {
3994 /* Turn on IOCB processing */
3995 for (i = 0; i < phba->sli.num_rings; i++)
3996 lpfc_sli_turn_on_ring(phba, i);
3997 }
3998
3999 }
4000
2e0fef85 4001 spin_lock(&phba->hbalock);
dea3101e 4002 phba->work_ha |= work_ha_copy;
4003 if (phba->work_wait)
92d7f7b0 4004 lpfc_worker_wake_up(phba);
2e0fef85 4005 spin_unlock(&phba->hbalock);
dea3101e 4006 }
4007
4008 ha_copy &= ~(phba->work_ha_mask);
4009
4010 /*
4011 * Process all events on FCP ring. Take the optimized path for
4012 * FCP IO. Any other IO is slow path and is handled by
4013 * the worker thread.
4014 */
4015 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
4016 status >>= (4*LPFC_FCP_RING);
858c9f6c 4017 if (status & HA_RXMASK)
dea3101e 4018 lpfc_sli_handle_fast_ring_event(phba,
4019 &phba->sli.ring[LPFC_FCP_RING],
4020 status);
a4bc3379
JS
4021
4022 if (phba->cfg_multi_ring_support == 2) {
4023 /*
4024 * Process all events on extra ring. Take the optimized path
4025 * for extra ring IO. Any other IO is slow path and is handled
4026 * by the worker thread.
4027 */
4028 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
4029 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 4030 if (status & HA_RXMASK) {
a4bc3379
JS
4031 lpfc_sli_handle_fast_ring_event(phba,
4032 &phba->sli.ring[LPFC_EXTRA_RING],
4033 status);
4034 }
4035 }
dea3101e 4036 return IRQ_HANDLED;
4037
4038} /* lpfc_intr_handler */
This page took 0.557005 seconds and 5 git commands to generate.