Merge branch 'acpi-lpss'
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
bdcd2b92 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
dea3101e 27
91886523 28#include <scsi/scsi.h>
dea3101e 29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
f888ba3c 32#include <scsi/scsi_transport_fc.h>
da0436e9 33#include <scsi/fc/fc_fs.h>
0d878419 34#include <linux/aer.h>
dea3101e 35
da0436e9 36#include "lpfc_hw4.h"
dea3101e 37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e 41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_crtn.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_compat.h"
858c9f6c 47#include "lpfc_debugfs.h"
04c68496 48#include "lpfc_vport.h"
dea3101e 49
50/* There are only four IOCB completion types. */
51typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56} lpfc_iocb_type;
57
4f774513
JS
58
59/* Provide function prototypes local to this module. */
60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
63 uint8_t *, uint32_t *);
64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
6669f9bb
JS
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
0558056c
JS
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
8a9d2e80
JS
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
ba20c853
JS
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
0558056c 74
4f774513
JS
75static IOCB_t *
76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77{
78 return &iocbq->iocb;
79}
80
81/**
82 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83 * @q: The Work Queue to operate on.
84 * @wqe: The work Queue Entry to put on the Work queue.
85 *
86 * This routine will copy the contents of @wqe to the next available entry on
87 * the @q. This function will then ring the Work Queue Doorbell to signal the
88 * HBA to start processing the Work Queue Entry. This function returns 0 if
89 * successful. If no entries are available on @q then this function will return
90 * -ENOMEM.
91 * The caller is expected to hold the hbalock when calling this routine.
92 **/
93static uint32_t
94lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95{
2e90f4b5 96 union lpfc_wqe *temp_wqe;
4f774513
JS
97 struct lpfc_register doorbell;
98 uint32_t host_index;
027140ea 99 uint32_t idx;
4f774513 100
2e90f4b5
JS
101 /* sanity check on queue memory */
102 if (unlikely(!q))
103 return -ENOMEM;
104 temp_wqe = q->qe[q->host_index].wqe;
105
4f774513 106 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
b84daac9 109 q->WQ_overflow++;
4f774513 110 return -ENOMEM;
b84daac9
JS
111 }
112 q->WQ_posted++;
4f774513 113 /* set consumption flag every once in a while */
ff78d8f9 114 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
fedd3b7b
JS
116 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
4f774513
JS
118 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120 /* Update the host index before invoking device */
121 host_index = q->host_index;
027140ea
JS
122
123 q->host_index = idx;
4f774513
JS
124
125 /* Ring Doorbell */
126 doorbell.word0 = 0;
962bc51b
JS
127 if (q->db_format == LPFC_DB_LIST_FORMAT) {
128 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134 } else {
135 return -EINVAL;
136 }
137 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
138
139 return 0;
140}
141
142/**
143 * lpfc_sli4_wq_release - Updates internal hba index for WQ
144 * @q: The Work Queue to operate on.
145 * @index: The index to advance the hba index to.
146 *
147 * This routine will update the HBA index of a queue to reflect consumption of
148 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
149 * an entry the host calls this function to update the queue's internal
150 * pointers. This routine returns the number of entries that were consumed by
151 * the HBA.
152 **/
153static uint32_t
154lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
155{
156 uint32_t released = 0;
157
2e90f4b5
JS
158 /* sanity check on queue memory */
159 if (unlikely(!q))
160 return 0;
161
4f774513
JS
162 if (q->hba_index == index)
163 return 0;
164 do {
165 q->hba_index = ((q->hba_index + 1) % q->entry_count);
166 released++;
167 } while (q->hba_index != index);
168 return released;
169}
170
171/**
172 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
173 * @q: The Mailbox Queue to operate on.
174 * @wqe: The Mailbox Queue Entry to put on the Work queue.
175 *
176 * This routine will copy the contents of @mqe to the next available entry on
177 * the @q. This function will then ring the Work Queue Doorbell to signal the
178 * HBA to start processing the Work Queue Entry. This function returns 0 if
179 * successful. If no entries are available on @q then this function will return
180 * -ENOMEM.
181 * The caller is expected to hold the hbalock when calling this routine.
182 **/
183static uint32_t
184lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
185{
2e90f4b5 186 struct lpfc_mqe *temp_mqe;
4f774513
JS
187 struct lpfc_register doorbell;
188 uint32_t host_index;
189
2e90f4b5
JS
190 /* sanity check on queue memory */
191 if (unlikely(!q))
192 return -ENOMEM;
193 temp_mqe = q->qe[q->host_index].mqe;
194
4f774513
JS
195 /* If the host has not yet processed the next entry then we are done */
196 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
197 return -ENOMEM;
198 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
199 /* Save off the mailbox pointer for completion */
200 q->phba->mbox = (MAILBOX_t *)temp_mqe;
201
202 /* Update the host index before invoking device */
203 host_index = q->host_index;
204 q->host_index = ((q->host_index + 1) % q->entry_count);
205
206 /* Ring Doorbell */
207 doorbell.word0 = 0;
208 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
209 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
210 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
211 return 0;
212}
213
214/**
215 * lpfc_sli4_mq_release - Updates internal hba index for MQ
216 * @q: The Mailbox Queue to operate on.
217 *
218 * This routine will update the HBA index of a queue to reflect consumption of
219 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
220 * an entry the host calls this function to update the queue's internal
221 * pointers. This routine returns the number of entries that were consumed by
222 * the HBA.
223 **/
224static uint32_t
225lpfc_sli4_mq_release(struct lpfc_queue *q)
226{
2e90f4b5
JS
227 /* sanity check on queue memory */
228 if (unlikely(!q))
229 return 0;
230
4f774513
JS
231 /* Clear the mailbox pointer for completion */
232 q->phba->mbox = NULL;
233 q->hba_index = ((q->hba_index + 1) % q->entry_count);
234 return 1;
235}
236
237/**
238 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
239 * @q: The Event Queue to get the first valid EQE from
240 *
241 * This routine will get the first valid Event Queue Entry from @q, update
242 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
243 * the Queue (no more work to do), or the Queue is full of EQEs that have been
244 * processed, but not popped back to the HBA then this routine will return NULL.
245 **/
246static struct lpfc_eqe *
247lpfc_sli4_eq_get(struct lpfc_queue *q)
248{
2e90f4b5 249 struct lpfc_eqe *eqe;
027140ea 250 uint32_t idx;
2e90f4b5
JS
251
252 /* sanity check on queue memory */
253 if (unlikely(!q))
254 return NULL;
255 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
256
257 /* If the next EQE is not valid then we are done */
cb5172ea 258 if (!bf_get_le32(lpfc_eqe_valid, eqe))
4f774513
JS
259 return NULL;
260 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
261 idx = ((q->hba_index + 1) % q->entry_count);
262 if (idx == q->host_index)
4f774513
JS
263 return NULL;
264
027140ea 265 q->hba_index = idx;
4f774513
JS
266 return eqe;
267}
268
ba20c853
JS
269/**
270 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
271 * @q: The Event Queue to disable interrupts
272 *
273 **/
274static inline void
275lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
276{
277 struct lpfc_register doorbell;
278
279 doorbell.word0 = 0;
280 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
281 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
282 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
283 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
284 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
285 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
286}
287
4f774513
JS
288/**
289 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
290 * @q: The Event Queue that the host has completed processing for.
291 * @arm: Indicates whether the host wants to arms this CQ.
292 *
293 * This routine will mark all Event Queue Entries on @q, from the last
294 * known completed entry to the last entry that was processed, as completed
295 * by clearing the valid bit for each completion queue entry. Then it will
296 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
297 * The internal host index in the @q will be updated by this routine to indicate
298 * that the host has finished processing the entries. The @arm parameter
299 * indicates that the queue should be rearmed when ringing the doorbell.
300 *
301 * This function will return the number of EQEs that were popped.
302 **/
303uint32_t
304lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
305{
306 uint32_t released = 0;
307 struct lpfc_eqe *temp_eqe;
308 struct lpfc_register doorbell;
309
2e90f4b5
JS
310 /* sanity check on queue memory */
311 if (unlikely(!q))
312 return 0;
313
4f774513
JS
314 /* while there are valid entries */
315 while (q->hba_index != q->host_index) {
316 temp_eqe = q->qe[q->host_index].eqe;
cb5172ea 317 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
4f774513
JS
318 released++;
319 q->host_index = ((q->host_index + 1) % q->entry_count);
320 }
321 if (unlikely(released == 0 && !arm))
322 return 0;
323
324 /* ring doorbell for number popped */
325 doorbell.word0 = 0;
326 if (arm) {
327 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
328 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
329 }
330 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
331 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
332 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
333 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
334 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
4f774513 335 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
336 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
337 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
338 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
339 return released;
340}
341
342/**
343 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
344 * @q: The Completion Queue to get the first valid CQE from
345 *
346 * This routine will get the first valid Completion Queue Entry from @q, update
347 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
348 * the Queue (no more work to do), or the Queue is full of CQEs that have been
349 * processed, but not popped back to the HBA then this routine will return NULL.
350 **/
351static struct lpfc_cqe *
352lpfc_sli4_cq_get(struct lpfc_queue *q)
353{
354 struct lpfc_cqe *cqe;
027140ea 355 uint32_t idx;
4f774513 356
2e90f4b5
JS
357 /* sanity check on queue memory */
358 if (unlikely(!q))
359 return NULL;
360
4f774513 361 /* If the next CQE is not valid then we are done */
cb5172ea 362 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
4f774513
JS
363 return NULL;
364 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
365 idx = ((q->hba_index + 1) % q->entry_count);
366 if (idx == q->host_index)
4f774513
JS
367 return NULL;
368
369 cqe = q->qe[q->hba_index].cqe;
027140ea 370 q->hba_index = idx;
4f774513
JS
371 return cqe;
372}
373
374/**
375 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
376 * @q: The Completion Queue that the host has completed processing for.
377 * @arm: Indicates whether the host wants to arms this CQ.
378 *
379 * This routine will mark all Completion queue entries on @q, from the last
380 * known completed entry to the last entry that was processed, as completed
381 * by clearing the valid bit for each completion queue entry. Then it will
382 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
383 * The internal host index in the @q will be updated by this routine to indicate
384 * that the host has finished processing the entries. The @arm parameter
385 * indicates that the queue should be rearmed when ringing the doorbell.
386 *
387 * This function will return the number of CQEs that were released.
388 **/
389uint32_t
390lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
391{
392 uint32_t released = 0;
393 struct lpfc_cqe *temp_qe;
394 struct lpfc_register doorbell;
395
2e90f4b5
JS
396 /* sanity check on queue memory */
397 if (unlikely(!q))
398 return 0;
4f774513
JS
399 /* while there are valid entries */
400 while (q->hba_index != q->host_index) {
401 temp_qe = q->qe[q->host_index].cqe;
cb5172ea 402 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
4f774513
JS
403 released++;
404 q->host_index = ((q->host_index + 1) % q->entry_count);
405 }
406 if (unlikely(released == 0 && !arm))
407 return 0;
408
409 /* ring doorbell for number popped */
410 doorbell.word0 = 0;
411 if (arm)
412 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
414 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
415 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
416 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
417 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
4f774513
JS
418 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
419 return released;
420}
421
422/**
423 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
424 * @q: The Header Receive Queue to operate on.
425 * @wqe: The Receive Queue Entry to put on the Receive queue.
426 *
427 * This routine will copy the contents of @wqe to the next available entry on
428 * the @q. This function will then ring the Receive Queue Doorbell to signal the
429 * HBA to start processing the Receive Queue Entry. This function returns the
430 * index that the rqe was copied to if successful. If no entries are available
431 * on @q then this function will return -ENOMEM.
432 * The caller is expected to hold the hbalock when calling this routine.
433 **/
434static int
435lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
436 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
437{
2e90f4b5
JS
438 struct lpfc_rqe *temp_hrqe;
439 struct lpfc_rqe *temp_drqe;
4f774513 440 struct lpfc_register doorbell;
5a25bf36 441 int put_index;
4f774513 442
2e90f4b5
JS
443 /* sanity check on queue memory */
444 if (unlikely(!hq) || unlikely(!dq))
445 return -ENOMEM;
5a25bf36 446 put_index = hq->host_index;
2e90f4b5
JS
447 temp_hrqe = hq->qe[hq->host_index].rqe;
448 temp_drqe = dq->qe[dq->host_index].rqe;
449
4f774513
JS
450 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
451 return -EINVAL;
452 if (hq->host_index != dq->host_index)
453 return -EINVAL;
454 /* If the host has not yet processed the next entry then we are done */
455 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
456 return -EBUSY;
457 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
458 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
459
460 /* Update the host index to point to the next slot */
461 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
462 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
463
464 /* Ring The Header Receive Queue Doorbell */
73d91e50 465 if (!(hq->host_index % hq->entry_repost)) {
4f774513 466 doorbell.word0 = 0;
962bc51b
JS
467 if (hq->db_format == LPFC_DB_RING_FORMAT) {
468 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
469 hq->entry_repost);
470 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
471 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
472 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
473 hq->entry_repost);
474 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
475 hq->host_index);
476 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
477 } else {
478 return -EINVAL;
479 }
480 writel(doorbell.word0, hq->db_regaddr);
4f774513
JS
481 }
482 return put_index;
483}
484
485/**
486 * lpfc_sli4_rq_release - Updates internal hba index for RQ
487 * @q: The Header Receive Queue to operate on.
488 *
489 * This routine will update the HBA index of a queue to reflect consumption of
490 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
491 * consumed an entry the host calls this function to update the queue's
492 * internal pointers. This routine returns the number of entries that were
493 * consumed by the HBA.
494 **/
495static uint32_t
496lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
497{
2e90f4b5
JS
498 /* sanity check on queue memory */
499 if (unlikely(!hq) || unlikely(!dq))
500 return 0;
501
4f774513
JS
502 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
503 return 0;
504 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
505 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
506 return 1;
507}
508
e59058c4 509/**
3621a710 510 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
511 * @phba: Pointer to HBA context object.
512 * @pring: Pointer to driver SLI ring object.
513 *
514 * This function returns pointer to next command iocb entry
515 * in the command ring. The caller must hold hbalock to prevent
516 * other threads consume the next command iocb.
517 * SLI-2/SLI-3 provide different sized iocbs.
518 **/
ed957684
JS
519static inline IOCB_t *
520lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
521{
7e56aa25
JS
522 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
523 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
524}
525
e59058c4 526/**
3621a710 527 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
528 * @phba: Pointer to HBA context object.
529 * @pring: Pointer to driver SLI ring object.
530 *
531 * This function returns pointer to next response iocb entry
532 * in the response ring. The caller must hold hbalock to make sure
533 * that no other thread consume the next response iocb.
534 * SLI-2/SLI-3 provide different sized iocbs.
535 **/
ed957684
JS
536static inline IOCB_t *
537lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
538{
7e56aa25
JS
539 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
540 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
541}
542
e59058c4 543/**
3621a710 544 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
545 * @phba: Pointer to HBA context object.
546 *
547 * This function is called with hbalock held. This function
548 * allocates a new driver iocb object from the iocb pool. If the
549 * allocation is successful, it returns pointer to the newly
550 * allocated iocb object else it returns NULL.
551 **/
4f2e66c6 552struct lpfc_iocbq *
2e0fef85 553__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
554{
555 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
556 struct lpfc_iocbq * iocbq = NULL;
557
558 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
559 if (iocbq)
560 phba->iocb_cnt++;
561 if (phba->iocb_cnt > phba->iocb_max)
562 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
563 return iocbq;
564}
565
da0436e9
JS
566/**
567 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
568 * @phba: Pointer to HBA context object.
569 * @xritag: XRI value.
570 *
571 * This function clears the sglq pointer from the array of acive
572 * sglq's. The xritag that is passed in is used to index into the
573 * array. Before the xritag can be used it needs to be adjusted
574 * by subtracting the xribase.
575 *
576 * Returns sglq ponter = success, NULL = Failure.
577 **/
578static struct lpfc_sglq *
579__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
580{
da0436e9 581 struct lpfc_sglq *sglq;
6d368e53
JS
582
583 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
584 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
585 return sglq;
586}
587
588/**
589 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
590 * @phba: Pointer to HBA context object.
591 * @xritag: XRI value.
592 *
593 * This function returns the sglq pointer from the array of acive
594 * sglq's. The xritag that is passed in is used to index into the
595 * array. Before the xritag can be used it needs to be adjusted
596 * by subtracting the xribase.
597 *
598 * Returns sglq ponter = success, NULL = Failure.
599 **/
0f65ff68 600struct lpfc_sglq *
da0436e9
JS
601__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
602{
da0436e9 603 struct lpfc_sglq *sglq;
6d368e53
JS
604
605 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
606 return sglq;
607}
608
19ca7609 609/**
1151e3ec 610 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
611 * @phba: Pointer to HBA context object.
612 * @xritag: xri used in this exchange.
613 * @rrq: The RRQ to be cleared.
614 *
19ca7609 615 **/
1151e3ec
JS
616void
617lpfc_clr_rrq_active(struct lpfc_hba *phba,
618 uint16_t xritag,
619 struct lpfc_node_rrq *rrq)
19ca7609 620{
1151e3ec 621 struct lpfc_nodelist *ndlp = NULL;
19ca7609 622
1151e3ec
JS
623 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
624 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
625
626 /* The target DID could have been swapped (cable swap)
627 * we should use the ndlp from the findnode if it is
628 * available.
629 */
1151e3ec 630 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
631 ndlp = rrq->ndlp;
632
1151e3ec
JS
633 if (!ndlp)
634 goto out;
635
6d368e53 636 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
19ca7609
JS
637 rrq->send_rrq = 0;
638 rrq->xritag = 0;
639 rrq->rrq_stop_time = 0;
640 }
1151e3ec 641out:
19ca7609
JS
642 mempool_free(rrq, phba->rrq_pool);
643}
644
645/**
646 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
647 * @phba: Pointer to HBA context object.
648 *
649 * This function is called with hbalock held. This function
650 * Checks if stop_time (ratov from setting rrq active) has
651 * been reached, if it has and the send_rrq flag is set then
652 * it will call lpfc_send_rrq. If the send_rrq flag is not set
653 * then it will just call the routine to clear the rrq and
654 * free the rrq resource.
655 * The timer is set to the next rrq that is going to expire before
656 * leaving the routine.
657 *
658 **/
659void
660lpfc_handle_rrq_active(struct lpfc_hba *phba)
661{
662 struct lpfc_node_rrq *rrq;
663 struct lpfc_node_rrq *nextrrq;
664 unsigned long next_time;
665 unsigned long iflags;
1151e3ec 666 LIST_HEAD(send_rrq);
19ca7609
JS
667
668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
670 next_time = jiffies + HZ * (phba->fc_ratov + 1);
671 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time))
674 list_move(&rrq->list, &send_rrq);
675 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
676 next_time = rrq->rrq_stop_time;
677 }
678 spin_unlock_irqrestore(&phba->hbalock, iflags);
679 if (!list_empty(&phba->active_rrq_list))
680 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
681 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
682 list_del(&rrq->list);
683 if (!rrq->send_rrq)
684 /* this call will free the rrq */
685 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
686 else if (lpfc_send_rrq(phba, rrq)) {
687 /* if we send the rrq then the completion handler
688 * will clear the bit in the xribitmap.
689 */
690 lpfc_clr_rrq_active(phba, rrq->xritag,
691 rrq);
692 }
693 }
19ca7609
JS
694}
695
696/**
697 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
698 * @vport: Pointer to vport context object.
699 * @xri: The xri used in the exchange.
700 * @did: The targets DID for this exchange.
701 *
702 * returns NULL = rrq not found in the phba->active_rrq_list.
703 * rrq = rrq for this xri and target.
704 **/
705struct lpfc_node_rrq *
706lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
707{
708 struct lpfc_hba *phba = vport->phba;
709 struct lpfc_node_rrq *rrq;
710 struct lpfc_node_rrq *nextrrq;
711 unsigned long iflags;
712
713 if (phba->sli_rev != LPFC_SLI_REV4)
714 return NULL;
715 spin_lock_irqsave(&phba->hbalock, iflags);
716 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
717 if (rrq->vport == vport && rrq->xritag == xri &&
718 rrq->nlp_DID == did){
719 list_del(&rrq->list);
720 spin_unlock_irqrestore(&phba->hbalock, iflags);
721 return rrq;
722 }
723 }
724 spin_unlock_irqrestore(&phba->hbalock, iflags);
725 return NULL;
726}
727
728/**
729 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
730 * @vport: Pointer to vport context object.
1151e3ec
JS
731 * @ndlp: Pointer to the lpfc_node_list structure.
732 * If ndlp is NULL Remove all active RRQs for this vport from the
733 * phba->active_rrq_list and clear the rrq.
734 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
735 **/
736void
1151e3ec 737lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
738
739{
740 struct lpfc_hba *phba = vport->phba;
741 struct lpfc_node_rrq *rrq;
742 struct lpfc_node_rrq *nextrrq;
743 unsigned long iflags;
1151e3ec 744 LIST_HEAD(rrq_list);
19ca7609
JS
745
746 if (phba->sli_rev != LPFC_SLI_REV4)
747 return;
1151e3ec
JS
748 if (!ndlp) {
749 lpfc_sli4_vport_delete_els_xri_aborted(vport);
750 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 751 }
1151e3ec
JS
752 spin_lock_irqsave(&phba->hbalock, iflags);
753 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
754 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
755 list_move(&rrq->list, &rrq_list);
19ca7609 756 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
757
758 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
759 list_del(&rrq->list);
760 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
761 }
19ca7609
JS
762}
763
764/**
765 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
766 * @phba: Pointer to HBA context object.
767 *
768 * Remove all rrqs from the phba->active_rrq_list and free them by
769 * calling __lpfc_clr_active_rrq
770 *
771 **/
772void
773lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
774{
775 struct lpfc_node_rrq *rrq;
776 struct lpfc_node_rrq *nextrrq;
777 unsigned long next_time;
778 unsigned long iflags;
1151e3ec 779 LIST_HEAD(rrq_list);
19ca7609
JS
780
781 if (phba->sli_rev != LPFC_SLI_REV4)
782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
785 next_time = jiffies + HZ * (phba->fc_ratov * 2);
1151e3ec
JS
786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788
789 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
19ca7609 790 list_del(&rrq->list);
1151e3ec 791 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
19ca7609 792 }
19ca7609
JS
793 if (!list_empty(&phba->active_rrq_list))
794 mod_timer(&phba->rrq_tmr, next_time);
795}
796
797
798/**
1151e3ec 799 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
800 * @phba: Pointer to HBA context object.
801 * @ndlp: Targets nodelist pointer for this exchange.
802 * @xritag the xri in the bitmap to test.
803 *
804 * This function is called with hbalock held. This function
805 * returns 0 = rrq not active for this xri
806 * 1 = rrq is valid for this xri.
807 **/
1151e3ec
JS
808int
809lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
810 uint16_t xritag)
811{
19ca7609
JS
812 if (!ndlp)
813 return 0;
6d368e53 814 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
19ca7609
JS
815 return 1;
816 else
817 return 0;
818}
819
820/**
821 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
822 * @phba: Pointer to HBA context object.
823 * @ndlp: nodelist pointer for this target.
824 * @xritag: xri used in this exchange.
825 * @rxid: Remote Exchange ID.
826 * @send_rrq: Flag used to determine if we should send rrq els cmd.
827 *
828 * This function takes the hbalock.
829 * The active bit is always set in the active rrq xri_bitmap even
830 * if there is no slot avaiable for the other rrq information.
831 *
832 * returns 0 rrq actived for this xri
833 * < 0 No memory or invalid ndlp.
834 **/
835int
836lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 837 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 838{
19ca7609 839 unsigned long iflags;
b42c07c8
JS
840 struct lpfc_node_rrq *rrq;
841 int empty;
842
843 if (!ndlp)
844 return -EINVAL;
845
846 if (!phba->cfg_enable_rrq)
847 return -EINVAL;
19ca7609
JS
848
849 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
850 if (phba->pport->load_flag & FC_UNLOADING) {
851 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
852 goto out;
853 }
854
855 /*
856 * set the active bit even if there is no mem available.
857 */
858 if (NLP_CHK_FREE_REQ(ndlp))
859 goto out;
860
861 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
862 goto out;
863
864 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
865 goto out;
866
19ca7609 867 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
868 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
869 if (!rrq) {
870 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
871 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
872 " DID:0x%x Send:%d\n",
873 xritag, rxid, ndlp->nlp_DID, send_rrq);
874 return -EINVAL;
875 }
876 rrq->send_rrq = send_rrq;
877 rrq->xritag = xritag;
878 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
879 rrq->ndlp = ndlp;
880 rrq->nlp_DID = ndlp->nlp_DID;
881 rrq->vport = ndlp->vport;
882 rrq->rxid = rxid;
883 rrq->send_rrq = send_rrq;
884 spin_lock_irqsave(&phba->hbalock, iflags);
885 empty = list_empty(&phba->active_rrq_list);
886 list_add_tail(&rrq->list, &phba->active_rrq_list);
887 phba->hba_flag |= HBA_RRQ_ACTIVE;
888 if (empty)
889 lpfc_worker_wake_up(phba);
890 spin_unlock_irqrestore(&phba->hbalock, iflags);
891 return 0;
892out:
893 spin_unlock_irqrestore(&phba->hbalock, iflags);
894 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
895 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
896 " DID:0x%x Send:%d\n",
897 xritag, rxid, ndlp->nlp_DID, send_rrq);
898 return -EINVAL;
19ca7609
JS
899}
900
da0436e9
JS
901/**
902 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
903 * @phba: Pointer to HBA context object.
19ca7609 904 * @piocb: Pointer to the iocbq.
da0436e9
JS
905 *
906 * This function is called with hbalock held. This function
6d368e53 907 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
908 * list is not empty then it is successful, it returns pointer to the newly
909 * allocated sglq object else it returns NULL.
910 **/
911static struct lpfc_sglq *
19ca7609 912__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9
JS
913{
914 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
915 struct lpfc_sglq *sglq = NULL;
19ca7609 916 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
917 struct lpfc_scsi_buf *lpfc_cmd;
918 struct lpfc_nodelist *ndlp;
919 int found = 0;
920
921 if (piocbq->iocb_flag & LPFC_IO_FCP) {
922 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
923 ndlp = lpfc_cmd->rdata->pnode;
be858b65
JS
924 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
925 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
19ca7609 926 ndlp = piocbq->context_un.ndlp;
93d1379e
JS
927 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
928 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
929 ndlp = piocbq->context_un.ndlp;
19ca7609
JS
930 else
931 ndlp = piocbq->context1;
932
da0436e9 933 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
934 start_sglq = sglq;
935 while (!found) {
936 if (!sglq)
937 return NULL;
ee0f4fe1 938 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
19ca7609
JS
939 /* This xri has an rrq outstanding for this DID.
940 * put it back in the list and get another xri.
941 */
942 list_add_tail(&sglq->list, lpfc_sgl_list);
943 sglq = NULL;
944 list_remove_head(lpfc_sgl_list, sglq,
945 struct lpfc_sglq, list);
946 if (sglq == start_sglq) {
947 sglq = NULL;
948 break;
949 } else
950 continue;
951 }
952 sglq->ndlp = ndlp;
953 found = 1;
6d368e53 954 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
955 sglq->state = SGL_ALLOCATED;
956 }
da0436e9
JS
957 return sglq;
958}
959
e59058c4 960/**
3621a710 961 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
962 * @phba: Pointer to HBA context object.
963 *
964 * This function is called with no lock held. This function
965 * allocates a new driver iocb object from the iocb pool. If the
966 * allocation is successful, it returns pointer to the newly
967 * allocated iocb object else it returns NULL.
968 **/
2e0fef85
JS
969struct lpfc_iocbq *
970lpfc_sli_get_iocbq(struct lpfc_hba *phba)
971{
972 struct lpfc_iocbq * iocbq = NULL;
973 unsigned long iflags;
974
975 spin_lock_irqsave(&phba->hbalock, iflags);
976 iocbq = __lpfc_sli_get_iocbq(phba);
977 spin_unlock_irqrestore(&phba->hbalock, iflags);
978 return iocbq;
979}
980
4f774513
JS
981/**
982 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
983 * @phba: Pointer to HBA context object.
984 * @iocbq: Pointer to driver iocb object.
985 *
986 * This function is called with hbalock held to release driver
987 * iocb object to the iocb pool. The iotag in the iocb object
988 * does not change for each use of the iocb object. This function
989 * clears all other fields of the iocb object when it is freed.
990 * The sqlq structure that holds the xritag and phys and virtual
991 * mappings for the scatter gather list is retrieved from the
992 * active array of sglq. The get of the sglq pointer also clears
993 * the entry in the array. If the status of the IO indiactes that
994 * this IO was aborted then the sglq entry it put on the
995 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
996 * IO has good status or fails for any other reason then the sglq
997 * entry is added to the free list (lpfc_sgl_list).
998 **/
999static void
1000__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1001{
1002 struct lpfc_sglq *sglq;
1003 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0
JS
1004 unsigned long iflag = 0;
1005 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4f774513
JS
1006
1007 if (iocbq->sli4_xritag == NO_XRI)
1008 sglq = NULL;
1009 else
6d368e53
JS
1010 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1011
4f774513 1012 if (sglq) {
0f65ff68
JS
1013 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1014 (sglq->state != SGL_XRI_ABORTED)) {
4f774513
JS
1015 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1016 iflag);
1017 list_add(&sglq->list,
1018 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1019 spin_unlock_irqrestore(
1020 &phba->sli4_hba.abts_sgl_list_lock, iflag);
0f65ff68
JS
1021 } else {
1022 sglq->state = SGL_FREED;
19ca7609 1023 sglq->ndlp = NULL;
fedd3b7b
JS
1024 list_add_tail(&sglq->list,
1025 &phba->sli4_hba.lpfc_sgl_list);
2a9bf3d0
JS
1026
1027 /* Check if TXQ queue needs to be serviced */
589a52d6 1028 if (pring->txq_cnt)
2a9bf3d0 1029 lpfc_worker_wake_up(phba);
0f65ff68 1030 }
4f774513
JS
1031 }
1032
1033
1034 /*
1035 * Clean all volatile data fields, preserve iotag and node struct.
1036 */
1037 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1038 iocbq->sli4_lxritag = NO_XRI;
4f774513
JS
1039 iocbq->sli4_xritag = NO_XRI;
1040 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1041}
1042
2a9bf3d0 1043
e59058c4 1044/**
3772a991 1045 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1046 * @phba: Pointer to HBA context object.
1047 * @iocbq: Pointer to driver iocb object.
1048 *
1049 * This function is called with hbalock held to release driver
1050 * iocb object to the iocb pool. The iotag in the iocb object
1051 * does not change for each use of the iocb object. This function
1052 * clears all other fields of the iocb object when it is freed.
1053 **/
a6ababd2 1054static void
3772a991 1055__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1056{
2e0fef85 1057 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
1058
1059 /*
1060 * Clean all volatile data fields, preserve iotag and node struct.
1061 */
1062 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1063 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1064 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1065}
1066
3772a991
JS
1067/**
1068 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1069 * @phba: Pointer to HBA context object.
1070 * @iocbq: Pointer to driver iocb object.
1071 *
1072 * This function is called with hbalock held to release driver
1073 * iocb object to the iocb pool. The iotag in the iocb object
1074 * does not change for each use of the iocb object. This function
1075 * clears all other fields of the iocb object when it is freed.
1076 **/
1077static void
1078__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1079{
1080 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1081 phba->iocb_cnt--;
3772a991
JS
1082}
1083
e59058c4 1084/**
3621a710 1085 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1086 * @phba: Pointer to HBA context object.
1087 * @iocbq: Pointer to driver iocb object.
1088 *
1089 * This function is called with no lock held to release the iocb to
1090 * iocb pool.
1091 **/
2e0fef85
JS
1092void
1093lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1094{
1095 unsigned long iflags;
1096
1097 /*
1098 * Clean all volatile data fields, preserve iotag and node struct.
1099 */
1100 spin_lock_irqsave(&phba->hbalock, iflags);
1101 __lpfc_sli_release_iocbq(phba, iocbq);
1102 spin_unlock_irqrestore(&phba->hbalock, iflags);
1103}
1104
a257bf90
JS
1105/**
1106 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1107 * @phba: Pointer to HBA context object.
1108 * @iocblist: List of IOCBs.
1109 * @ulpstatus: ULP status in IOCB command field.
1110 * @ulpWord4: ULP word-4 in IOCB command field.
1111 *
1112 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1113 * on the list by invoking the complete callback function associated with the
1114 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1115 * fields.
1116 **/
1117void
1118lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1119 uint32_t ulpstatus, uint32_t ulpWord4)
1120{
1121 struct lpfc_iocbq *piocb;
1122
1123 while (!list_empty(iocblist)) {
1124 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1125
1126 if (!piocb->iocb_cmpl)
1127 lpfc_sli_release_iocbq(phba, piocb);
1128 else {
1129 piocb->iocb.ulpStatus = ulpstatus;
1130 piocb->iocb.un.ulpWord[4] = ulpWord4;
1131 (piocb->iocb_cmpl) (phba, piocb, piocb);
1132 }
1133 }
1134 return;
1135}
1136
e59058c4 1137/**
3621a710
JS
1138 * lpfc_sli_iocb_cmd_type - Get the iocb type
1139 * @iocb_cmnd: iocb command code.
e59058c4
JS
1140 *
1141 * This function is called by ring event handler function to get the iocb type.
1142 * This function translates the iocb command to an iocb command type used to
1143 * decide the final disposition of each completed IOCB.
1144 * The function returns
1145 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1146 * LPFC_SOL_IOCB if it is a solicited iocb completion
1147 * LPFC_ABORT_IOCB if it is an abort iocb
1148 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1149 *
1150 * The caller is not required to hold any lock.
1151 **/
dea3101e 1152static lpfc_iocb_type
1153lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1154{
1155 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1156
1157 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1158 return 0;
1159
1160 switch (iocb_cmnd) {
1161 case CMD_XMIT_SEQUENCE_CR:
1162 case CMD_XMIT_SEQUENCE_CX:
1163 case CMD_XMIT_BCAST_CN:
1164 case CMD_XMIT_BCAST_CX:
1165 case CMD_ELS_REQUEST_CR:
1166 case CMD_ELS_REQUEST_CX:
1167 case CMD_CREATE_XRI_CR:
1168 case CMD_CREATE_XRI_CX:
1169 case CMD_GET_RPI_CN:
1170 case CMD_XMIT_ELS_RSP_CX:
1171 case CMD_GET_RPI_CR:
1172 case CMD_FCP_IWRITE_CR:
1173 case CMD_FCP_IWRITE_CX:
1174 case CMD_FCP_IREAD_CR:
1175 case CMD_FCP_IREAD_CX:
1176 case CMD_FCP_ICMND_CR:
1177 case CMD_FCP_ICMND_CX:
f5603511
JS
1178 case CMD_FCP_TSEND_CX:
1179 case CMD_FCP_TRSP_CX:
1180 case CMD_FCP_TRECEIVE_CX:
1181 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1182 case CMD_ADAPTER_MSG:
1183 case CMD_ADAPTER_DUMP:
1184 case CMD_XMIT_SEQUENCE64_CR:
1185 case CMD_XMIT_SEQUENCE64_CX:
1186 case CMD_XMIT_BCAST64_CN:
1187 case CMD_XMIT_BCAST64_CX:
1188 case CMD_ELS_REQUEST64_CR:
1189 case CMD_ELS_REQUEST64_CX:
1190 case CMD_FCP_IWRITE64_CR:
1191 case CMD_FCP_IWRITE64_CX:
1192 case CMD_FCP_IREAD64_CR:
1193 case CMD_FCP_IREAD64_CX:
1194 case CMD_FCP_ICMND64_CR:
1195 case CMD_FCP_ICMND64_CX:
f5603511
JS
1196 case CMD_FCP_TSEND64_CX:
1197 case CMD_FCP_TRSP64_CX:
1198 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1199 case CMD_GEN_REQUEST64_CR:
1200 case CMD_GEN_REQUEST64_CX:
1201 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1202 case DSSCMD_IWRITE64_CR:
1203 case DSSCMD_IWRITE64_CX:
1204 case DSSCMD_IREAD64_CR:
1205 case DSSCMD_IREAD64_CX:
dea3101e 1206 type = LPFC_SOL_IOCB;
1207 break;
1208 case CMD_ABORT_XRI_CN:
1209 case CMD_ABORT_XRI_CX:
1210 case CMD_CLOSE_XRI_CN:
1211 case CMD_CLOSE_XRI_CX:
1212 case CMD_XRI_ABORTED_CX:
1213 case CMD_ABORT_MXRI64_CN:
6669f9bb 1214 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1215 type = LPFC_ABORT_IOCB;
1216 break;
1217 case CMD_RCV_SEQUENCE_CX:
1218 case CMD_RCV_ELS_REQ_CX:
1219 case CMD_RCV_SEQUENCE64_CX:
1220 case CMD_RCV_ELS_REQ64_CX:
57127f15 1221 case CMD_ASYNC_STATUS:
ed957684
JS
1222 case CMD_IOCB_RCV_SEQ64_CX:
1223 case CMD_IOCB_RCV_ELS64_CX:
1224 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1225 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1226 type = LPFC_UNSOL_IOCB;
1227 break;
3163f725
JS
1228 case CMD_IOCB_XMIT_MSEQ64_CR:
1229 case CMD_IOCB_XMIT_MSEQ64_CX:
1230 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1231 case CMD_IOCB_RCV_ELS_LIST64_CX:
1232 case CMD_IOCB_CLOSE_EXTENDED_CN:
1233 case CMD_IOCB_ABORT_EXTENDED_CN:
1234 case CMD_IOCB_RET_HBQE64_CN:
1235 case CMD_IOCB_FCP_IBIDIR64_CR:
1236 case CMD_IOCB_FCP_IBIDIR64_CX:
1237 case CMD_IOCB_FCP_ITASKMGT64_CX:
1238 case CMD_IOCB_LOGENTRY_CN:
1239 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1240 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1241 __func__, iocb_cmnd);
3163f725
JS
1242 type = LPFC_UNKNOWN_IOCB;
1243 break;
dea3101e 1244 default:
1245 type = LPFC_UNKNOWN_IOCB;
1246 break;
1247 }
1248
1249 return type;
1250}
1251
e59058c4 1252/**
3621a710 1253 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1254 * @phba: Pointer to HBA context object.
1255 *
1256 * This function is called from SLI initialization code
1257 * to configure every ring of the HBA's SLI interface. The
1258 * caller is not required to hold any lock. This function issues
1259 * a config_ring mailbox command for each ring.
1260 * This function returns zero if successful else returns a negative
1261 * error code.
1262 **/
dea3101e 1263static int
ed957684 1264lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1265{
1266 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1267 LPFC_MBOXQ_t *pmb;
1268 MAILBOX_t *pmbox;
1269 int i, rc, ret = 0;
dea3101e 1270
ed957684
JS
1271 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1272 if (!pmb)
1273 return -ENOMEM;
04c68496 1274 pmbox = &pmb->u.mb;
ed957684 1275 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1276 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1277 lpfc_config_ring(phba, i, pmb);
1278 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1279 if (rc != MBX_SUCCESS) {
92d7f7b0 1280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1281 "0446 Adapter failed to init (%d), "
dea3101e 1282 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1283 "ring %d\n",
e8b62011
JS
1284 rc, pmbox->mbxCommand,
1285 pmbox->mbxStatus, i);
2e0fef85 1286 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1287 ret = -ENXIO;
1288 break;
dea3101e 1289 }
1290 }
ed957684
JS
1291 mempool_free(pmb, phba->mbox_mem_pool);
1292 return ret;
dea3101e 1293}
1294
e59058c4 1295/**
3621a710 1296 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1297 * @phba: Pointer to HBA context object.
1298 * @pring: Pointer to driver SLI ring object.
1299 * @piocb: Pointer to the driver iocb object.
1300 *
1301 * This function is called with hbalock held. The function adds the
1302 * new iocb to txcmplq of the given ring. This function always returns
1303 * 0. If this function is called for ELS ring, this function checks if
1304 * there is a vport associated with the ELS command. This function also
1305 * starts els_tmofunc timer if this is an ELS command.
1306 **/
dea3101e 1307static int
2e0fef85
JS
1308lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1309 struct lpfc_iocbq *piocb)
dea3101e 1310{
dea3101e 1311 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1312 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
dea3101e 1313 pring->txcmplq_cnt++;
2a9bf3d0
JS
1314 if (pring->txcmplq_cnt > pring->txcmplq_max)
1315 pring->txcmplq_max = pring->txcmplq_cnt;
1316
92d7f7b0
JS
1317 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1318 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1319 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1320 if (!piocb->vport)
1321 BUG();
1322 else
1323 mod_timer(&piocb->vport->els_tmofunc,
1324 jiffies + HZ * (phba->fc_ratov << 1));
1325 }
1326
dea3101e 1327
2e0fef85 1328 return 0;
dea3101e 1329}
1330
e59058c4 1331/**
3621a710 1332 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1333 * @phba: Pointer to HBA context object.
1334 * @pring: Pointer to driver SLI ring object.
1335 *
1336 * This function is called with hbalock held to get next
1337 * iocb in txq of the given ring. If there is any iocb in
1338 * the txq, the function returns first iocb in the list after
1339 * removing the iocb from the list, else it returns NULL.
1340 **/
2a9bf3d0 1341struct lpfc_iocbq *
2e0fef85 1342lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1343{
dea3101e 1344 struct lpfc_iocbq *cmd_iocb;
1345
858c9f6c
JS
1346 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1347 if (cmd_iocb != NULL)
dea3101e 1348 pring->txq_cnt--;
2e0fef85 1349 return cmd_iocb;
dea3101e 1350}
1351
e59058c4 1352/**
3621a710 1353 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1354 * @phba: Pointer to HBA context object.
1355 * @pring: Pointer to driver SLI ring object.
1356 *
1357 * This function is called with hbalock held and the caller must post the
1358 * iocb without releasing the lock. If the caller releases the lock,
1359 * iocb slot returned by the function is not guaranteed to be available.
1360 * The function returns pointer to the next available iocb slot if there
1361 * is available slot in the ring, else it returns NULL.
1362 * If the get index of the ring is ahead of the put index, the function
1363 * will post an error attention event to the worker thread to take the
1364 * HBA to offline state.
1365 **/
dea3101e 1366static IOCB_t *
1367lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1368{
34b02dcd 1369 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25
JS
1370 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1371 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1372 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1373 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1374
7e56aa25
JS
1375 if (unlikely(pring->sli.sli3.local_getidx ==
1376 pring->sli.sli3.next_cmdidx)) {
dea3101e 1377
7e56aa25 1378 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1379
7e56aa25 1380 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1382 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1383 "is bigger than cmd ring %d\n",
e8b62011 1384 pring->ringno,
7e56aa25
JS
1385 pring->sli.sli3.local_getidx,
1386 max_cmd_idx);
dea3101e 1387
2e0fef85 1388 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1389 /*
1390 * All error attention handlers are posted to
1391 * worker thread
1392 */
1393 phba->work_ha |= HA_ERATT;
1394 phba->work_hs = HS_FFER3;
92d7f7b0 1395
5e9d9b82 1396 lpfc_worker_wake_up(phba);
dea3101e 1397
1398 return NULL;
1399 }
1400
7e56aa25 1401 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1402 return NULL;
1403 }
1404
ed957684 1405 return lpfc_cmd_iocb(phba, pring);
dea3101e 1406}
1407
e59058c4 1408/**
3621a710 1409 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1410 * @phba: Pointer to HBA context object.
1411 * @iocbq: Pointer to driver iocb object.
1412 *
1413 * This function gets an iotag for the iocb. If there is no unused iotag and
1414 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1415 * array and assigns a new iotag.
1416 * The function returns the allocated iotag if successful, else returns zero.
1417 * Zero is not a valid iotag.
1418 * The caller is not required to hold any lock.
1419 **/
604a3e30 1420uint16_t
2e0fef85 1421lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1422{
2e0fef85
JS
1423 struct lpfc_iocbq **new_arr;
1424 struct lpfc_iocbq **old_arr;
604a3e30
JB
1425 size_t new_len;
1426 struct lpfc_sli *psli = &phba->sli;
1427 uint16_t iotag;
dea3101e 1428
2e0fef85 1429 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1430 iotag = psli->last_iotag;
1431 if(++iotag < psli->iocbq_lookup_len) {
1432 psli->last_iotag = iotag;
1433 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1434 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1435 iocbq->iotag = iotag;
1436 return iotag;
2e0fef85 1437 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1438 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1439 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
1440 spin_unlock_irq(&phba->hbalock);
1441 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1442 GFP_KERNEL);
1443 if (new_arr) {
2e0fef85 1444 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1445 old_arr = psli->iocbq_lookup;
1446 if (new_len <= psli->iocbq_lookup_len) {
1447 /* highly unprobable case */
1448 kfree(new_arr);
1449 iotag = psli->last_iotag;
1450 if(++iotag < psli->iocbq_lookup_len) {
1451 psli->last_iotag = iotag;
1452 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1453 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1454 iocbq->iotag = iotag;
1455 return iotag;
1456 }
2e0fef85 1457 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1458 return 0;
1459 }
1460 if (psli->iocbq_lookup)
1461 memcpy(new_arr, old_arr,
1462 ((psli->last_iotag + 1) *
311464ec 1463 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1464 psli->iocbq_lookup = new_arr;
1465 psli->iocbq_lookup_len = new_len;
1466 psli->last_iotag = iotag;
1467 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1468 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1469 iocbq->iotag = iotag;
1470 kfree(old_arr);
1471 return iotag;
1472 }
8f6d98d2 1473 } else
2e0fef85 1474 spin_unlock_irq(&phba->hbalock);
dea3101e 1475
bc73905a 1476 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1477 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1478 psli->last_iotag);
dea3101e 1479
604a3e30 1480 return 0;
dea3101e 1481}
1482
e59058c4 1483/**
3621a710 1484 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1485 * @phba: Pointer to HBA context object.
1486 * @pring: Pointer to driver SLI ring object.
1487 * @iocb: Pointer to iocb slot in the ring.
1488 * @nextiocb: Pointer to driver iocb object which need to be
1489 * posted to firmware.
1490 *
1491 * This function is called with hbalock held to post a new iocb to
1492 * the firmware. This function copies the new iocb to ring iocb slot and
1493 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1494 * a completion call back for this iocb else the function will free the
1495 * iocb object.
1496 **/
dea3101e 1497static void
1498lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1499 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1500{
1501 /*
604a3e30 1502 * Set up an iotag
dea3101e 1503 */
604a3e30 1504 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1505
e2a0a9d6 1506
a58cbd52
JS
1507 if (pring->ringno == LPFC_ELS_RING) {
1508 lpfc_debugfs_slow_ring_trc(phba,
1509 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1510 *(((uint32_t *) &nextiocb->iocb) + 4),
1511 *(((uint32_t *) &nextiocb->iocb) + 6),
1512 *(((uint32_t *) &nextiocb->iocb) + 7));
1513 }
1514
dea3101e 1515 /*
1516 * Issue iocb command to adapter
1517 */
92d7f7b0 1518 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1519 wmb();
1520 pring->stats.iocb_cmd++;
1521
1522 /*
1523 * If there is no completion routine to call, we can release the
1524 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1525 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1526 */
1527 if (nextiocb->iocb_cmpl)
1528 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1529 else
2e0fef85 1530 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1531
1532 /*
1533 * Let the HBA know what IOCB slot will be the next one the
1534 * driver will put a command into.
1535 */
7e56aa25
JS
1536 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1537 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1538}
1539
e59058c4 1540/**
3621a710 1541 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1542 * @phba: Pointer to HBA context object.
1543 * @pring: Pointer to driver SLI ring object.
1544 *
1545 * The caller is not required to hold any lock for calling this function.
1546 * This function updates the chip attention bits for the ring to inform firmware
1547 * that there are pending work to be done for this ring and requests an
1548 * interrupt when there is space available in the ring. This function is
1549 * called when the driver is unable to post more iocbs to the ring due
1550 * to unavailability of space in the ring.
1551 **/
dea3101e 1552static void
2e0fef85 1553lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1554{
1555 int ringno = pring->ringno;
1556
1557 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1558
1559 wmb();
1560
1561 /*
1562 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1563 * The HBA will tell us when an IOCB entry is available.
1564 */
1565 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1566 readl(phba->CAregaddr); /* flush */
1567
1568 pring->stats.iocb_cmd_full++;
1569}
1570
e59058c4 1571/**
3621a710 1572 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1573 * @phba: Pointer to HBA context object.
1574 * @pring: Pointer to driver SLI ring object.
1575 *
1576 * This function updates the chip attention register bit for the
1577 * given ring to inform HBA that there is more work to be done
1578 * in this ring. The caller is not required to hold any lock.
1579 **/
dea3101e 1580static void
2e0fef85 1581lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1582{
1583 int ringno = pring->ringno;
1584
1585 /*
1586 * Tell the HBA that there is work to do in this ring.
1587 */
34b02dcd
JS
1588 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1589 wmb();
1590 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1591 readl(phba->CAregaddr); /* flush */
1592 }
dea3101e 1593}
1594
e59058c4 1595/**
3621a710 1596 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1597 * @phba: Pointer to HBA context object.
1598 * @pring: Pointer to driver SLI ring object.
1599 *
1600 * This function is called with hbalock held to post pending iocbs
1601 * in the txq to the firmware. This function is called when driver
1602 * detects space available in the ring.
1603 **/
dea3101e 1604static void
2e0fef85 1605lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1606{
1607 IOCB_t *iocb;
1608 struct lpfc_iocbq *nextiocb;
1609
1610 /*
1611 * Check to see if:
1612 * (a) there is anything on the txq to send
1613 * (b) link is up
1614 * (c) link attention events can be processed (fcp ring only)
1615 * (d) IOCB processing is not blocked by the outstanding mbox command.
1616 */
1617 if (pring->txq_cnt &&
2e0fef85 1618 lpfc_is_link_up(phba) &&
dea3101e 1619 (pring->ringno != phba->sli.fcp_ring ||
0b727fea 1620 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1621
1622 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1623 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1624 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1625
1626 if (iocb)
1627 lpfc_sli_update_ring(phba, pring);
1628 else
1629 lpfc_sli_update_full_ring(phba, pring);
1630 }
1631
1632 return;
1633}
1634
e59058c4 1635/**
3621a710 1636 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1637 * @phba: Pointer to HBA context object.
1638 * @hbqno: HBQ number.
1639 *
1640 * This function is called with hbalock held to get the next
1641 * available slot for the given HBQ. If there is free slot
1642 * available for the HBQ it will return pointer to the next available
1643 * HBQ entry else it will return NULL.
1644 **/
a6ababd2 1645static struct lpfc_hbq_entry *
ed957684
JS
1646lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1647{
1648 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1649
1650 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1651 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1652 hbqp->next_hbqPutIdx = 0;
1653
1654 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1655 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1656 uint32_t getidx = le32_to_cpu(raw_index);
1657
1658 hbqp->local_hbqGetIdx = getidx;
1659
1660 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1661 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1662 LOG_SLI | LOG_VPORT,
e8b62011 1663 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1664 "%u is > than hbqp->entry_count %u\n",
e8b62011 1665 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1666 hbqp->entry_count);
1667
1668 phba->link_state = LPFC_HBA_ERROR;
1669 return NULL;
1670 }
1671
1672 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1673 return NULL;
1674 }
1675
51ef4c26
JS
1676 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1677 hbqp->hbqPutIdx;
ed957684
JS
1678}
1679
e59058c4 1680/**
3621a710 1681 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1682 * @phba: Pointer to HBA context object.
1683 *
1684 * This function is called with no lock held to free all the
1685 * hbq buffers while uninitializing the SLI interface. It also
1686 * frees the HBQ buffers returned by the firmware but not yet
1687 * processed by the upper layers.
1688 **/
ed957684
JS
1689void
1690lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1691{
92d7f7b0
JS
1692 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1693 struct hbq_dmabuf *hbq_buf;
3163f725 1694 unsigned long flags;
51ef4c26 1695 int i, hbq_count;
3163f725 1696 uint32_t hbqno;
ed957684 1697
51ef4c26 1698 hbq_count = lpfc_sli_hbq_count();
ed957684 1699 /* Return all memory used by all HBQs */
3163f725 1700 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1701 for (i = 0; i < hbq_count; ++i) {
1702 list_for_each_entry_safe(dmabuf, next_dmabuf,
1703 &phba->hbqs[i].hbq_buffer_list, list) {
1704 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1705 list_del(&hbq_buf->dbuf.list);
1706 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1707 }
a8adb832 1708 phba->hbqs[i].buffer_count = 0;
ed957684 1709 }
3163f725 1710 /* Return all HBQ buffer that are in-fly */
3772a991
JS
1711 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1712 list) {
3163f725
JS
1713 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1714 list_del(&hbq_buf->dbuf.list);
1715 if (hbq_buf->tag == -1) {
1716 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1717 (phba, hbq_buf);
1718 } else {
1719 hbqno = hbq_buf->tag >> 16;
1720 if (hbqno >= LPFC_MAX_HBQS)
1721 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1722 (phba, hbq_buf);
1723 else
1724 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1725 hbq_buf);
1726 }
1727 }
1728
1729 /* Mark the HBQs not in use */
1730 phba->hbq_in_use = 0;
1731 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1732}
1733
e59058c4 1734/**
3621a710 1735 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1736 * @phba: Pointer to HBA context object.
1737 * @hbqno: HBQ number.
1738 * @hbq_buf: Pointer to HBQ buffer.
1739 *
1740 * This function is called with the hbalock held to post a
1741 * hbq buffer to the firmware. If the function finds an empty
1742 * slot in the HBQ, it will post the buffer. The function will return
1743 * pointer to the hbq entry if it successfully post the buffer
1744 * else it will return NULL.
1745 **/
3772a991 1746static int
ed957684 1747lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1748 struct hbq_dmabuf *hbq_buf)
3772a991
JS
1749{
1750 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1751}
1752
1753/**
1754 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1755 * @phba: Pointer to HBA context object.
1756 * @hbqno: HBQ number.
1757 * @hbq_buf: Pointer to HBQ buffer.
1758 *
1759 * This function is called with the hbalock held to post a hbq buffer to the
1760 * firmware. If the function finds an empty slot in the HBQ, it will post the
1761 * buffer and place it on the hbq_buffer_list. The function will return zero if
1762 * it successfully post the buffer else it will return an error.
1763 **/
1764static int
1765lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1766 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1767{
1768 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1769 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
1770
1771 /* Get next HBQ entry slot to use */
1772 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1773 if (hbqe) {
1774 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1775
92d7f7b0
JS
1776 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1777 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 1778 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 1779 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1780 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1781 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1782 /* Sync SLIM */
ed957684
JS
1783 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1784 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1785 /* flush */
ed957684 1786 readl(phba->hbq_put + hbqno);
51ef4c26 1787 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1788 return 0;
1789 } else
1790 return -ENOMEM;
ed957684
JS
1791}
1792
4f774513
JS
1793/**
1794 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1795 * @phba: Pointer to HBA context object.
1796 * @hbqno: HBQ number.
1797 * @hbq_buf: Pointer to HBQ buffer.
1798 *
1799 * This function is called with the hbalock held to post an RQE to the SLI4
1800 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1801 * the hbq_buffer_list and return zero, otherwise it will return an error.
1802 **/
1803static int
1804lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1805 struct hbq_dmabuf *hbq_buf)
1806{
1807 int rc;
1808 struct lpfc_rqe hrqe;
1809 struct lpfc_rqe drqe;
1810
1811 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1812 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1813 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1814 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1815 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1816 &hrqe, &drqe);
1817 if (rc < 0)
1818 return rc;
1819 hbq_buf->tag = rc;
1820 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1821 return 0;
1822}
1823
e59058c4 1824/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1825static struct lpfc_hbq_init lpfc_els_hbq = {
1826 .rn = 1,
def9c7a9 1827 .entry_count = 256,
92d7f7b0
JS
1828 .mask_count = 0,
1829 .profile = 0,
51ef4c26 1830 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1831 .buffer_count = 0,
a257bf90
JS
1832 .init_count = 40,
1833 .add_count = 40,
92d7f7b0 1834};
ed957684 1835
e59058c4 1836/* HBQ for the extra ring if needed */
51ef4c26
JS
1837static struct lpfc_hbq_init lpfc_extra_hbq = {
1838 .rn = 1,
1839 .entry_count = 200,
1840 .mask_count = 0,
1841 .profile = 0,
1842 .ring_mask = (1 << LPFC_EXTRA_RING),
1843 .buffer_count = 0,
1844 .init_count = 0,
1845 .add_count = 5,
1846};
1847
e59058c4 1848/* Array of HBQs */
78b2d852 1849struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 1850 &lpfc_els_hbq,
51ef4c26 1851 &lpfc_extra_hbq,
92d7f7b0 1852};
ed957684 1853
e59058c4 1854/**
3621a710 1855 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1856 * @phba: Pointer to HBA context object.
1857 * @hbqno: HBQ number.
1858 * @count: Number of HBQ buffers to be posted.
1859 *
d7c255b2
JS
1860 * This function is called with no lock held to post more hbq buffers to the
1861 * given HBQ. The function returns the number of HBQ buffers successfully
1862 * posted.
e59058c4 1863 **/
311464ec 1864static int
92d7f7b0 1865lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1866{
d7c255b2 1867 uint32_t i, posted = 0;
3163f725 1868 unsigned long flags;
92d7f7b0 1869 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1870 LIST_HEAD(hbq_buf_list);
eafe1df9 1871 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1872 return 0;
51ef4c26 1873
d7c255b2
JS
1874 if ((phba->hbqs[hbqno].buffer_count + count) >
1875 lpfc_hbq_defs[hbqno]->entry_count)
1876 count = lpfc_hbq_defs[hbqno]->entry_count -
1877 phba->hbqs[hbqno].buffer_count;
1878 if (!count)
1879 return 0;
1880 /* Allocate HBQ entries */
1881 for (i = 0; i < count; i++) {
1882 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1883 if (!hbq_buffer)
1884 break;
1885 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1886 }
3163f725
JS
1887 /* Check whether HBQ is still in use */
1888 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1889 if (!phba->hbq_in_use)
d7c255b2
JS
1890 goto err;
1891 while (!list_empty(&hbq_buf_list)) {
1892 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1893 dbuf.list);
1894 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1895 (hbqno << 16));
3772a991 1896 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1897 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1898 posted++;
1899 } else
51ef4c26 1900 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1901 }
3163f725 1902 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1903 return posted;
1904err:
eafe1df9 1905 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1906 while (!list_empty(&hbq_buf_list)) {
1907 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1908 dbuf.list);
1909 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1910 }
1911 return 0;
ed957684
JS
1912}
1913
e59058c4 1914/**
3621a710 1915 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1916 * @phba: Pointer to HBA context object.
1917 * @qno: HBQ number.
1918 *
1919 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1920 * is called with no lock held. The function returns the number of HBQ entries
1921 * successfully allocated.
e59058c4 1922 **/
92d7f7b0
JS
1923int
1924lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1925{
def9c7a9
JS
1926 if (phba->sli_rev == LPFC_SLI_REV4)
1927 return 0;
1928 else
1929 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1930 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1931}
ed957684 1932
e59058c4 1933/**
3621a710 1934 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1935 * @phba: Pointer to HBA context object.
1936 * @qno: HBQ queue number.
1937 *
1938 * This function is called from SLI initialization code path with
1939 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 1940 * function returns the number of HBQ entries successfully allocated.
e59058c4 1941 **/
a6ababd2 1942static int
92d7f7b0
JS
1943lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1944{
def9c7a9
JS
1945 if (phba->sli_rev == LPFC_SLI_REV4)
1946 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 1947 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
1948 else
1949 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1950 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
1951}
1952
3772a991
JS
1953/**
1954 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1955 * @phba: Pointer to HBA context object.
1956 * @hbqno: HBQ number.
1957 *
1958 * This function removes the first hbq buffer on an hbq list and returns a
1959 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1960 **/
1961static struct hbq_dmabuf *
1962lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1963{
1964 struct lpfc_dmabuf *d_buf;
1965
1966 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1967 if (!d_buf)
1968 return NULL;
1969 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1970}
1971
e59058c4 1972/**
3621a710 1973 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
1974 * @phba: Pointer to HBA context object.
1975 * @tag: Tag of the hbq buffer.
1976 *
1977 * This function is called with hbalock held. This function searches
1978 * for the hbq buffer associated with the given tag in the hbq buffer
1979 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1980 * it returns NULL.
1981 **/
a6ababd2 1982static struct hbq_dmabuf *
92d7f7b0 1983lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 1984{
92d7f7b0
JS
1985 struct lpfc_dmabuf *d_buf;
1986 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
1987 uint32_t hbqno;
1988
1989 hbqno = tag >> 16;
a0a74e45 1990 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 1991 return NULL;
ed957684 1992
3772a991 1993 spin_lock_irq(&phba->hbalock);
51ef4c26 1994 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 1995 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 1996 if (hbq_buf->tag == tag) {
3772a991 1997 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1998 return hbq_buf;
ed957684
JS
1999 }
2000 }
3772a991 2001 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2003 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2004 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2005 return NULL;
ed957684
JS
2006}
2007
e59058c4 2008/**
3621a710 2009 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2010 * @phba: Pointer to HBA context object.
2011 * @hbq_buffer: Pointer to HBQ buffer.
2012 *
2013 * This function is called with hbalock. This function gives back
2014 * the hbq buffer to firmware. If the HBQ does not have space to
2015 * post the buffer, it will free the buffer.
2016 **/
ed957684 2017void
51ef4c26 2018lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2019{
2020 uint32_t hbqno;
2021
51ef4c26
JS
2022 if (hbq_buffer) {
2023 hbqno = hbq_buffer->tag >> 16;
3772a991 2024 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2025 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2026 }
2027}
2028
e59058c4 2029/**
3621a710 2030 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2031 * @mbxCommand: mailbox command code.
2032 *
2033 * This function is called by the mailbox event handler function to verify
2034 * that the completed mailbox command is a legitimate mailbox command. If the
2035 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2036 * and the mailbox event handler will take the HBA offline.
2037 **/
dea3101e 2038static int
2039lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2040{
2041 uint8_t ret;
2042
2043 switch (mbxCommand) {
2044 case MBX_LOAD_SM:
2045 case MBX_READ_NV:
2046 case MBX_WRITE_NV:
a8adb832 2047 case MBX_WRITE_VPARMS:
dea3101e 2048 case MBX_RUN_BIU_DIAG:
2049 case MBX_INIT_LINK:
2050 case MBX_DOWN_LINK:
2051 case MBX_CONFIG_LINK:
2052 case MBX_CONFIG_RING:
2053 case MBX_RESET_RING:
2054 case MBX_READ_CONFIG:
2055 case MBX_READ_RCONFIG:
2056 case MBX_READ_SPARM:
2057 case MBX_READ_STATUS:
2058 case MBX_READ_RPI:
2059 case MBX_READ_XRI:
2060 case MBX_READ_REV:
2061 case MBX_READ_LNK_STAT:
2062 case MBX_REG_LOGIN:
2063 case MBX_UNREG_LOGIN:
dea3101e 2064 case MBX_CLEAR_LA:
2065 case MBX_DUMP_MEMORY:
2066 case MBX_DUMP_CONTEXT:
2067 case MBX_RUN_DIAGS:
2068 case MBX_RESTART:
2069 case MBX_UPDATE_CFG:
2070 case MBX_DOWN_LOAD:
2071 case MBX_DEL_LD_ENTRY:
2072 case MBX_RUN_PROGRAM:
2073 case MBX_SET_MASK:
09372820 2074 case MBX_SET_VARIABLE:
dea3101e 2075 case MBX_UNREG_D_ID:
41415862 2076 case MBX_KILL_BOARD:
dea3101e 2077 case MBX_CONFIG_FARP:
41415862 2078 case MBX_BEACON:
dea3101e 2079 case MBX_LOAD_AREA:
2080 case MBX_RUN_BIU_DIAG64:
2081 case MBX_CONFIG_PORT:
2082 case MBX_READ_SPARM64:
2083 case MBX_READ_RPI64:
2084 case MBX_REG_LOGIN64:
76a95d75 2085 case MBX_READ_TOPOLOGY:
09372820 2086 case MBX_WRITE_WWN:
dea3101e 2087 case MBX_SET_DEBUG:
2088 case MBX_LOAD_EXP_ROM:
57127f15 2089 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2090 case MBX_REG_VPI:
2091 case MBX_UNREG_VPI:
858c9f6c 2092 case MBX_HEARTBEAT:
84774a4d
JS
2093 case MBX_PORT_CAPABILITIES:
2094 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2095 case MBX_SLI4_CONFIG:
2096 case MBX_SLI4_REQ_FTRS:
2097 case MBX_REG_FCFI:
2098 case MBX_UNREG_FCFI:
2099 case MBX_REG_VFI:
2100 case MBX_UNREG_VFI:
2101 case MBX_INIT_VPI:
2102 case MBX_INIT_VFI:
2103 case MBX_RESUME_RPI:
c7495937
JS
2104 case MBX_READ_EVENT_LOG_STATUS:
2105 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2106 case MBX_SECURITY_MGMT:
2107 case MBX_AUTH_PORT:
940eb687 2108 case MBX_ACCESS_VDATA:
dea3101e 2109 ret = mbxCommand;
2110 break;
2111 default:
2112 ret = MBX_SHUTDOWN;
2113 break;
2114 }
2e0fef85 2115 return ret;
dea3101e 2116}
e59058c4
JS
2117
2118/**
3621a710 2119 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2120 * @phba: Pointer to HBA context object.
2121 * @pmboxq: Pointer to mailbox command.
2122 *
2123 * This is completion handler function for mailbox commands issued from
2124 * lpfc_sli_issue_mbox_wait function. This function is called by the
2125 * mailbox event handler function with no lock held. This function
2126 * will wake up thread waiting on the wait queue pointed by context1
2127 * of the mailbox.
2128 **/
04c68496 2129void
2e0fef85 2130lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2131{
2132 wait_queue_head_t *pdone_q;
858c9f6c 2133 unsigned long drvr_flag;
dea3101e 2134
2135 /*
2136 * If pdone_q is empty, the driver thread gave up waiting and
2137 * continued running.
2138 */
7054a606 2139 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2140 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2141 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2142 if (pdone_q)
2143 wake_up_interruptible(pdone_q);
858c9f6c 2144 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2145 return;
2146}
2147
e59058c4
JS
2148
2149/**
3621a710 2150 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2151 * @phba: Pointer to HBA context object.
2152 * @pmb: Pointer to mailbox object.
2153 *
2154 * This function is the default mailbox completion handler. It
2155 * frees the memory resources associated with the completed mailbox
2156 * command. If the completed command is a REG_LOGIN mailbox command,
2157 * this function will issue a UREG_LOGIN to re-claim the RPI.
2158 **/
dea3101e 2159void
2e0fef85 2160lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2161{
d439d286 2162 struct lpfc_vport *vport = pmb->vport;
dea3101e 2163 struct lpfc_dmabuf *mp;
d439d286 2164 struct lpfc_nodelist *ndlp;
5af5eee7 2165 struct Scsi_Host *shost;
04c68496 2166 uint16_t rpi, vpi;
7054a606
JS
2167 int rc;
2168
dea3101e 2169 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 2170
dea3101e 2171 if (mp) {
2172 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2173 kfree(mp);
2174 }
7054a606
JS
2175
2176 /*
2177 * If a REG_LOGIN succeeded after node is destroyed or node
2178 * is in re-discovery driver need to cleanup the RPI.
2179 */
2e0fef85 2180 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2181 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2182 !pmb->u.mb.mbxStatus) {
2183 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2184 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2185 lpfc_unreg_login(phba, vpi, rpi, pmb);
92d7f7b0 2186 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2187 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2188 if (rc != MBX_NOT_FINISHED)
2189 return;
2190 }
2191
695a814e
JS
2192 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2193 !(phba->pport->load_flag & FC_UNLOADING) &&
2194 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2195 shost = lpfc_shost_from_vport(vport);
2196 spin_lock_irq(shost->host_lock);
2197 vport->vpi_state |= LPFC_VPI_REGISTERED;
2198 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2199 spin_unlock_irq(shost->host_lock);
695a814e
JS
2200 }
2201
d439d286
JS
2202 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2203 ndlp = (struct lpfc_nodelist *)pmb->context2;
2204 lpfc_nlp_put(ndlp);
2205 pmb->context2 = NULL;
2206 }
2207
dcf2a4e0
JS
2208 /* Check security permission status on INIT_LINK mailbox command */
2209 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2210 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2211 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2212 "2860 SLI authentication is required "
2213 "for INIT_LINK but has not done yet\n");
2214
04c68496
JS
2215 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2216 lpfc_sli4_mbox_cmd_free(phba, pmb);
2217 else
2218 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2219}
2220
e59058c4 2221/**
3621a710 2222 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2223 * @phba: Pointer to HBA context object.
2224 *
2225 * This function is called with no lock held. This function processes all
2226 * the completed mailbox commands and gives it to upper layers. The interrupt
2227 * service routine processes mailbox completion interrupt and adds completed
2228 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2229 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2230 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2231 * function returns the mailbox commands to the upper layer by calling the
2232 * completion handler function of each mailbox.
2233 **/
dea3101e 2234int
2e0fef85 2235lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2236{
92d7f7b0 2237 MAILBOX_t *pmbox;
dea3101e 2238 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2239 int rc;
2240 LIST_HEAD(cmplq);
dea3101e 2241
2242 phba->sli.slistat.mbox_event++;
2243
92d7f7b0
JS
2244 /* Get all completed mailboxe buffers into the cmplq */
2245 spin_lock_irq(&phba->hbalock);
2246 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2247 spin_unlock_irq(&phba->hbalock);
dea3101e 2248
92d7f7b0
JS
2249 /* Get a Mailbox buffer to setup mailbox commands for callback */
2250 do {
2251 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2252 if (pmb == NULL)
2253 break;
2e0fef85 2254
04c68496 2255 pmbox = &pmb->u.mb;
dea3101e 2256
858c9f6c
JS
2257 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2258 if (pmb->vport) {
2259 lpfc_debugfs_disc_trc(pmb->vport,
2260 LPFC_DISC_TRC_MBOX_VPORT,
2261 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2262 (uint32_t)pmbox->mbxCommand,
2263 pmbox->un.varWords[0],
2264 pmbox->un.varWords[1]);
2265 }
2266 else {
2267 lpfc_debugfs_disc_trc(phba->pport,
2268 LPFC_DISC_TRC_MBOX,
2269 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2270 (uint32_t)pmbox->mbxCommand,
2271 pmbox->un.varWords[0],
2272 pmbox->un.varWords[1]);
2273 }
2274 }
2275
dea3101e 2276 /*
2277 * It is a fatal error if unknown mbox command completion.
2278 */
2279 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2280 MBX_SHUTDOWN) {
af901ca1 2281 /* Unknown mailbox command compl */
92d7f7b0 2282 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2283 "(%d):0323 Unknown Mailbox command "
a183a15f 2284 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2285 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2286 pmbox->mbxCommand,
a183a15f
JS
2287 lpfc_sli_config_mbox_subsys_get(phba,
2288 pmb),
2289 lpfc_sli_config_mbox_opcode_get(phba,
2290 pmb));
2e0fef85 2291 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2292 phba->work_hs = HS_FFER3;
2293 lpfc_handle_eratt(phba);
92d7f7b0 2294 continue;
dea3101e 2295 }
2296
dea3101e 2297 if (pmbox->mbxStatus) {
2298 phba->sli.slistat.mbox_stat_err++;
2299 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2300 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2301 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2302 LOG_MBOX | LOG_SLI,
2303 "(%d):0305 Mbox cmd cmpl "
2304 "error - RETRYing Data: x%x "
2305 "(x%x/x%x) x%x x%x x%x\n",
2306 pmb->vport ? pmb->vport->vpi : 0,
2307 pmbox->mbxCommand,
2308 lpfc_sli_config_mbox_subsys_get(phba,
2309 pmb),
2310 lpfc_sli_config_mbox_opcode_get(phba,
2311 pmb),
2312 pmbox->mbxStatus,
2313 pmbox->un.varWords[0],
2314 pmb->vport->port_state);
dea3101e 2315 pmbox->mbxStatus = 0;
2316 pmbox->mbxOwner = OWN_HOST;
dea3101e 2317 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2318 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2319 continue;
dea3101e 2320 }
2321 }
2322
2323 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2324 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2325 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
dea3101e 2326 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
92d7f7b0 2327 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2328 pmbox->mbxCommand,
a183a15f
JS
2329 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2330 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2331 pmb->mbox_cmpl,
2332 *((uint32_t *) pmbox),
2333 pmbox->un.varWords[0],
2334 pmbox->un.varWords[1],
2335 pmbox->un.varWords[2],
2336 pmbox->un.varWords[3],
2337 pmbox->un.varWords[4],
2338 pmbox->un.varWords[5],
2339 pmbox->un.varWords[6],
2340 pmbox->un.varWords[7]);
2341
92d7f7b0 2342 if (pmb->mbox_cmpl)
dea3101e 2343 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2344 } while (1);
2345 return 0;
2346}
dea3101e 2347
e59058c4 2348/**
3621a710 2349 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2350 * @phba: Pointer to HBA context object.
2351 * @pring: Pointer to driver SLI ring object.
2352 * @tag: buffer tag.
2353 *
2354 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2355 * is set in the tag the buffer is posted for a particular exchange,
2356 * the function will return the buffer without replacing the buffer.
2357 * If the buffer is for unsolicited ELS or CT traffic, this function
2358 * returns the buffer and also posts another buffer to the firmware.
2359 **/
76bb24ef
JS
2360static struct lpfc_dmabuf *
2361lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2362 struct lpfc_sli_ring *pring,
2363 uint32_t tag)
76bb24ef 2364{
9f1e1b50
JS
2365 struct hbq_dmabuf *hbq_entry;
2366
76bb24ef
JS
2367 if (tag & QUE_BUFTAG_BIT)
2368 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2369 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2370 if (!hbq_entry)
2371 return NULL;
2372 return &hbq_entry->dbuf;
76bb24ef 2373}
57127f15 2374
3772a991
JS
2375/**
2376 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2377 * @phba: Pointer to HBA context object.
2378 * @pring: Pointer to driver SLI ring object.
2379 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2380 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2381 * @fch_type: the type for the first frame of the sequence.
2382 *
2383 * This function is called with no lock held. This function uses the r_ctl and
2384 * type of the received sequence to find the correct callback function to call
2385 * to process the sequence.
2386 **/
2387static int
2388lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2389 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2390 uint32_t fch_type)
2391{
2392 int i;
2393
2394 /* unSolicited Responses */
2395 if (pring->prt[0].profile) {
2396 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2397 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2398 saveq);
2399 return 1;
2400 }
2401 /* We must search, based on rctl / type
2402 for the right routine */
2403 for (i = 0; i < pring->num_mask; i++) {
2404 if ((pring->prt[i].rctl == fch_r_ctl) &&
2405 (pring->prt[i].type == fch_type)) {
2406 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2407 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2408 (phba, pring, saveq);
2409 return 1;
2410 }
2411 }
2412 return 0;
2413}
e59058c4
JS
2414
2415/**
3621a710 2416 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2417 * @phba: Pointer to HBA context object.
2418 * @pring: Pointer to driver SLI ring object.
2419 * @saveq: Pointer to the unsolicited iocb.
2420 *
2421 * This function is called with no lock held by the ring event handler
2422 * when there is an unsolicited iocb posted to the response ring by the
2423 * firmware. This function gets the buffer associated with the iocbs
2424 * and calls the event handler for the ring. This function handles both
2425 * qring buffers and hbq buffers.
2426 * When the function returns 1 the caller can free the iocb object otherwise
2427 * upper layer functions will free the iocb objects.
2428 **/
dea3101e 2429static int
2430lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2431 struct lpfc_iocbq *saveq)
2432{
2433 IOCB_t * irsp;
2434 WORD5 * w5p;
2435 uint32_t Rctl, Type;
3772a991 2436 uint32_t match;
76bb24ef 2437 struct lpfc_iocbq *iocbq;
3163f725 2438 struct lpfc_dmabuf *dmzbuf;
dea3101e 2439
2440 match = 0;
2441 irsp = &(saveq->iocb);
57127f15
JS
2442
2443 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2444 if (pring->lpfc_sli_rcv_async_status)
2445 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2446 else
2447 lpfc_printf_log(phba,
2448 KERN_WARNING,
2449 LOG_SLI,
2450 "0316 Ring %d handler: unexpected "
2451 "ASYNC_STATUS iocb received evt_code "
2452 "0x%x\n",
2453 pring->ringno,
2454 irsp->un.asyncstat.evt_code);
2455 return 1;
2456 }
2457
3163f725
JS
2458 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2459 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2460 if (irsp->ulpBdeCount > 0) {
2461 dmzbuf = lpfc_sli_get_buff(phba, pring,
2462 irsp->un.ulpWord[3]);
2463 lpfc_in_buf_free(phba, dmzbuf);
2464 }
2465
2466 if (irsp->ulpBdeCount > 1) {
2467 dmzbuf = lpfc_sli_get_buff(phba, pring,
2468 irsp->unsli3.sli3Words[3]);
2469 lpfc_in_buf_free(phba, dmzbuf);
2470 }
2471
2472 if (irsp->ulpBdeCount > 2) {
2473 dmzbuf = lpfc_sli_get_buff(phba, pring,
2474 irsp->unsli3.sli3Words[7]);
2475 lpfc_in_buf_free(phba, dmzbuf);
2476 }
2477
2478 return 1;
2479 }
2480
92d7f7b0 2481 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2482 if (irsp->ulpBdeCount != 0) {
2483 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2484 irsp->un.ulpWord[3]);
2485 if (!saveq->context2)
2486 lpfc_printf_log(phba,
2487 KERN_ERR,
2488 LOG_SLI,
2489 "0341 Ring %d Cannot find buffer for "
2490 "an unsolicited iocb. tag 0x%x\n",
2491 pring->ringno,
2492 irsp->un.ulpWord[3]);
76bb24ef
JS
2493 }
2494 if (irsp->ulpBdeCount == 2) {
2495 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2496 irsp->unsli3.sli3Words[7]);
2497 if (!saveq->context3)
2498 lpfc_printf_log(phba,
2499 KERN_ERR,
2500 LOG_SLI,
2501 "0342 Ring %d Cannot find buffer for an"
2502 " unsolicited iocb. tag 0x%x\n",
2503 pring->ringno,
2504 irsp->unsli3.sli3Words[7]);
2505 }
2506 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2507 irsp = &(iocbq->iocb);
76bb24ef
JS
2508 if (irsp->ulpBdeCount != 0) {
2509 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2510 irsp->un.ulpWord[3]);
9c2face6 2511 if (!iocbq->context2)
76bb24ef
JS
2512 lpfc_printf_log(phba,
2513 KERN_ERR,
2514 LOG_SLI,
2515 "0343 Ring %d Cannot find "
2516 "buffer for an unsolicited iocb"
2517 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2518 irsp->un.ulpWord[3]);
76bb24ef
JS
2519 }
2520 if (irsp->ulpBdeCount == 2) {
2521 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2522 irsp->unsli3.sli3Words[7]);
9c2face6 2523 if (!iocbq->context3)
76bb24ef
JS
2524 lpfc_printf_log(phba,
2525 KERN_ERR,
2526 LOG_SLI,
2527 "0344 Ring %d Cannot find "
2528 "buffer for an unsolicited "
2529 "iocb. tag 0x%x\n",
2530 pring->ringno,
2531 irsp->unsli3.sli3Words[7]);
2532 }
2533 }
92d7f7b0 2534 }
9c2face6
JS
2535 if (irsp->ulpBdeCount != 0 &&
2536 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2537 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2538 int found = 0;
2539
2540 /* search continue save q for same XRI */
2541 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2542 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2543 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2544 list_add_tail(&saveq->list, &iocbq->list);
2545 found = 1;
2546 break;
2547 }
2548 }
2549 if (!found)
2550 list_add_tail(&saveq->clist,
2551 &pring->iocb_continue_saveq);
2552 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2553 list_del_init(&iocbq->clist);
2554 saveq = iocbq;
2555 irsp = &(saveq->iocb);
2556 } else
2557 return 0;
2558 }
2559 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2560 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2561 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2562 Rctl = FC_RCTL_ELS_REQ;
2563 Type = FC_TYPE_ELS;
9c2face6
JS
2564 } else {
2565 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2566 Rctl = w5p->hcsw.Rctl;
2567 Type = w5p->hcsw.Type;
2568
2569 /* Firmware Workaround */
2570 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2571 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2572 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2573 Rctl = FC_RCTL_ELS_REQ;
2574 Type = FC_TYPE_ELS;
9c2face6
JS
2575 w5p->hcsw.Rctl = Rctl;
2576 w5p->hcsw.Type = Type;
2577 }
2578 }
92d7f7b0 2579
3772a991 2580 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2581 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2582 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2583 "Type x%x received\n",
e8b62011 2584 pring->ringno, Rctl, Type);
3772a991 2585
92d7f7b0 2586 return 1;
dea3101e 2587}
2588
e59058c4 2589/**
3621a710 2590 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2591 * @phba: Pointer to HBA context object.
2592 * @pring: Pointer to driver SLI ring object.
2593 * @prspiocb: Pointer to response iocb object.
2594 *
2595 * This function looks up the iocb_lookup table to get the command iocb
2596 * corresponding to the given response iocb using the iotag of the
2597 * response iocb. This function is called with the hbalock held.
2598 * This function returns the command iocb object if it finds the command
2599 * iocb else returns NULL.
2600 **/
dea3101e 2601static struct lpfc_iocbq *
2e0fef85
JS
2602lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2603 struct lpfc_sli_ring *pring,
2604 struct lpfc_iocbq *prspiocb)
dea3101e 2605{
dea3101e 2606 struct lpfc_iocbq *cmd_iocb = NULL;
2607 uint16_t iotag;
2608
604a3e30
JB
2609 iotag = prspiocb->iocb.ulpIoTag;
2610
2611 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2612 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 2613 list_del_init(&cmd_iocb->list);
4f2e66c6 2614 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2a9bf3d0 2615 pring->txcmplq_cnt--;
4f2e66c6 2616 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 2617 }
604a3e30 2618 return cmd_iocb;
dea3101e 2619 }
2620
dea3101e 2621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2622 "0317 iotag x%x is out off "
604a3e30 2623 "range: max iotag x%x wd0 x%x\n",
e8b62011 2624 iotag, phba->sli.last_iotag,
604a3e30 2625 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 2626 return NULL;
2627}
2628
3772a991
JS
2629/**
2630 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2631 * @phba: Pointer to HBA context object.
2632 * @pring: Pointer to driver SLI ring object.
2633 * @iotag: IOCB tag.
2634 *
2635 * This function looks up the iocb_lookup table to get the command iocb
2636 * corresponding to the given iotag. This function is called with the
2637 * hbalock held.
2638 * This function returns the command iocb object if it finds the command
2639 * iocb else returns NULL.
2640 **/
2641static struct lpfc_iocbq *
2642lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2643 struct lpfc_sli_ring *pring, uint16_t iotag)
2644{
2645 struct lpfc_iocbq *cmd_iocb;
2646
2647 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2648 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
2649 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2650 /* remove from txcmpl queue list */
2651 list_del_init(&cmd_iocb->list);
2652 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 2653 pring->txcmplq_cnt--;
4f2e66c6 2654 return cmd_iocb;
2a9bf3d0 2655 }
3772a991 2656 }
3772a991
JS
2657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2658 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2659 iotag, phba->sli.last_iotag);
2660 return NULL;
2661}
2662
e59058c4 2663/**
3621a710 2664 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2665 * @phba: Pointer to HBA context object.
2666 * @pring: Pointer to driver SLI ring object.
2667 * @saveq: Pointer to the response iocb to be processed.
2668 *
2669 * This function is called by the ring event handler for non-fcp
2670 * rings when there is a new response iocb in the response ring.
2671 * The caller is not required to hold any locks. This function
2672 * gets the command iocb associated with the response iocb and
2673 * calls the completion handler for the command iocb. If there
2674 * is no completion handler, the function will free the resources
2675 * associated with command iocb. If the response iocb is for
2676 * an already aborted command iocb, the status of the completion
2677 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2678 * This function always returns 1.
2679 **/
dea3101e 2680static int
2e0fef85 2681lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 2682 struct lpfc_iocbq *saveq)
2683{
2e0fef85 2684 struct lpfc_iocbq *cmdiocbp;
dea3101e 2685 int rc = 1;
2686 unsigned long iflag;
2687
2688 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2689 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2690 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2691 spin_unlock_irqrestore(&phba->hbalock, iflag);
2692
dea3101e 2693 if (cmdiocbp) {
2694 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2695 /*
2696 * If an ELS command failed send an event to mgmt
2697 * application.
2698 */
2699 if (saveq->iocb.ulpStatus &&
2700 (pring->ringno == LPFC_ELS_RING) &&
2701 (cmdiocbp->iocb.ulpCommand ==
2702 CMD_ELS_REQUEST64_CR))
2703 lpfc_send_els_failure_event(phba,
2704 cmdiocbp, saveq);
2705
dea3101e 2706 /*
2707 * Post all ELS completions to the worker thread.
2708 * All other are passed to the completion callback.
2709 */
2710 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
2711 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2712 (cmdiocbp->iocb_flag &
2713 LPFC_DRIVER_ABORTED)) {
2714 spin_lock_irqsave(&phba->hbalock,
2715 iflag);
07951076
JS
2716 cmdiocbp->iocb_flag &=
2717 ~LPFC_DRIVER_ABORTED;
341af102
JS
2718 spin_unlock_irqrestore(&phba->hbalock,
2719 iflag);
07951076
JS
2720 saveq->iocb.ulpStatus =
2721 IOSTAT_LOCAL_REJECT;
2722 saveq->iocb.un.ulpWord[4] =
2723 IOERR_SLI_ABORTED;
0ff10d46
JS
2724
2725 /* Firmware could still be in progress
2726 * of DMAing payload, so don't free data
2727 * buffer till after a hbeat.
2728 */
341af102
JS
2729 spin_lock_irqsave(&phba->hbalock,
2730 iflag);
0ff10d46 2731 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
2732 spin_unlock_irqrestore(&phba->hbalock,
2733 iflag);
2734 }
0f65ff68
JS
2735 if (phba->sli_rev == LPFC_SLI_REV4) {
2736 if (saveq->iocb_flag &
2737 LPFC_EXCHANGE_BUSY) {
2738 /* Set cmdiocb flag for the
2739 * exchange busy so sgl (xri)
2740 * will not be released until
2741 * the abort xri is received
2742 * from hba.
2743 */
2744 spin_lock_irqsave(
2745 &phba->hbalock, iflag);
2746 cmdiocbp->iocb_flag |=
2747 LPFC_EXCHANGE_BUSY;
2748 spin_unlock_irqrestore(
2749 &phba->hbalock, iflag);
2750 }
2751 if (cmdiocbp->iocb_flag &
2752 LPFC_DRIVER_ABORTED) {
2753 /*
2754 * Clear LPFC_DRIVER_ABORTED
2755 * bit in case it was driver
2756 * initiated abort.
2757 */
2758 spin_lock_irqsave(
2759 &phba->hbalock, iflag);
2760 cmdiocbp->iocb_flag &=
2761 ~LPFC_DRIVER_ABORTED;
2762 spin_unlock_irqrestore(
2763 &phba->hbalock, iflag);
2764 cmdiocbp->iocb.ulpStatus =
2765 IOSTAT_LOCAL_REJECT;
2766 cmdiocbp->iocb.un.ulpWord[4] =
2767 IOERR_ABORT_REQUESTED;
2768 /*
2769 * For SLI4, irsiocb contains
2770 * NO_XRI in sli_xritag, it
2771 * shall not affect releasing
2772 * sgl (xri) process.
2773 */
2774 saveq->iocb.ulpStatus =
2775 IOSTAT_LOCAL_REJECT;
2776 saveq->iocb.un.ulpWord[4] =
2777 IOERR_SLI_ABORTED;
2778 spin_lock_irqsave(
2779 &phba->hbalock, iflag);
2780 saveq->iocb_flag |=
2781 LPFC_DELAY_MEM_FREE;
2782 spin_unlock_irqrestore(
2783 &phba->hbalock, iflag);
2784 }
07951076 2785 }
dea3101e 2786 }
2e0fef85 2787 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2788 } else
2789 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 2790 } else {
2791 /*
2792 * Unknown initiating command based on the response iotag.
2793 * This could be the case on the ELS ring because of
2794 * lpfc_els_abort().
2795 */
2796 if (pring->ringno != LPFC_ELS_RING) {
2797 /*
2798 * Ring <ringno> handler: unexpected completion IoTag
2799 * <IoTag>
2800 */
a257bf90 2801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2802 "0322 Ring %d handler: "
2803 "unexpected completion IoTag x%x "
2804 "Data: x%x x%x x%x x%x\n",
2805 pring->ringno,
2806 saveq->iocb.ulpIoTag,
2807 saveq->iocb.ulpStatus,
2808 saveq->iocb.un.ulpWord[4],
2809 saveq->iocb.ulpCommand,
2810 saveq->iocb.ulpContext);
dea3101e 2811 }
2812 }
68876920 2813
dea3101e 2814 return rc;
2815}
2816
e59058c4 2817/**
3621a710 2818 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2819 * @phba: Pointer to HBA context object.
2820 * @pring: Pointer to driver SLI ring object.
2821 *
2822 * This function is called from the iocb ring event handlers when
2823 * put pointer is ahead of the get pointer for a ring. This function signal
2824 * an error attention condition to the worker thread and the worker
2825 * thread will transition the HBA to offline state.
2826 **/
2e0fef85
JS
2827static void
2828lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2829{
34b02dcd 2830 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2831 /*
025dfdaf 2832 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2833 * rsp ring <portRspMax>
2834 */
2835 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2836 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2837 "is bigger than rsp ring %d\n",
e8b62011 2838 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 2839 pring->sli.sli3.numRiocb);
875fbdfe 2840
2e0fef85 2841 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2842
2843 /*
2844 * All error attention handlers are posted to
2845 * worker thread
2846 */
2847 phba->work_ha |= HA_ERATT;
2848 phba->work_hs = HS_FFER3;
92d7f7b0 2849
5e9d9b82 2850 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2851
2852 return;
2853}
2854
9399627f 2855/**
3621a710 2856 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2857 * @ptr: Pointer to address of HBA context object.
2858 *
2859 * This function is invoked by the Error Attention polling timer when the
2860 * timer times out. It will check the SLI Error Attention register for
2861 * possible attention events. If so, it will post an Error Attention event
2862 * and wake up worker thread to process it. Otherwise, it will set up the
2863 * Error Attention polling timer for the next poll.
2864 **/
2865void lpfc_poll_eratt(unsigned long ptr)
2866{
2867 struct lpfc_hba *phba;
aa6fbb75
JS
2868 uint32_t eratt = 0, rem;
2869 uint64_t sli_intr, cnt;
9399627f
JS
2870
2871 phba = (struct lpfc_hba *)ptr;
2872
aa6fbb75
JS
2873 /* Here we will also keep track of interrupts per sec of the hba */
2874 sli_intr = phba->sli.slistat.sli_intr;
2875
2876 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2877 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2878 sli_intr);
2879 else
2880 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2881
2882 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2883 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2884 phba->sli.slistat.sli_ips = cnt;
2885
2886 phba->sli.slistat.sli_prev_intr = sli_intr;
2887
9399627f
JS
2888 /* Check chip HA register for error event */
2889 eratt = lpfc_sli_check_eratt(phba);
2890
2891 if (eratt)
2892 /* Tell the worker thread there is work to do */
2893 lpfc_worker_wake_up(phba);
2894 else
2895 /* Restart the timer for next eratt poll */
2896 mod_timer(&phba->eratt_poll, jiffies +
2897 HZ * LPFC_ERATT_POLL_INTERVAL);
2898 return;
2899}
2900
875fbdfe 2901
e59058c4 2902/**
3621a710 2903 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
2904 * @phba: Pointer to HBA context object.
2905 * @pring: Pointer to driver SLI ring object.
2906 * @mask: Host attention register mask for this ring.
2907 *
2908 * This function is called from the interrupt context when there is a ring
2909 * event for the fcp ring. The caller does not hold any lock.
2910 * The function processes each response iocb in the response ring until it
25985edc 2911 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
2912 * LE bit set. The function will call the completion handler of the command iocb
2913 * if the response iocb indicates a completion for a command iocb or it is
2914 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2915 * function if this is an unsolicited iocb.
dea3101e 2916 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
2917 * to check it explicitly.
2918 */
2919int
2e0fef85
JS
2920lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2921 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2922{
34b02dcd 2923 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 2924 IOCB_t *irsp = NULL;
87f6eaff 2925 IOCB_t *entry = NULL;
dea3101e 2926 struct lpfc_iocbq *cmdiocbq = NULL;
2927 struct lpfc_iocbq rspiocbq;
dea3101e 2928 uint32_t status;
2929 uint32_t portRspPut, portRspMax;
2930 int rc = 1;
2931 lpfc_iocb_type type;
2932 unsigned long iflag;
2933 uint32_t rsp_cmpl = 0;
dea3101e 2934
2e0fef85 2935 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 2936 pring->stats.iocb_event++;
2937
dea3101e 2938 /*
2939 * The next available response entry should never exceed the maximum
2940 * entries. If it does, treat it as an adapter hardware error.
2941 */
7e56aa25 2942 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 2943 portRspPut = le32_to_cpu(pgp->rspPutInx);
2944 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 2945 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 2946 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 2947 return 1;
2948 }
45ed1190
JS
2949 if (phba->fcp_ring_in_use) {
2950 spin_unlock_irqrestore(&phba->hbalock, iflag);
2951 return 1;
2952 } else
2953 phba->fcp_ring_in_use = 1;
dea3101e 2954
2955 rmb();
7e56aa25 2956 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
2957 /*
2958 * Fetch an entry off the ring and copy it into a local data
2959 * structure. The copy involves a byte-swap since the
2960 * network byte order and pci byte orders are different.
2961 */
ed957684 2962 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 2963 phba->last_completion_time = jiffies;
875fbdfe 2964
7e56aa25
JS
2965 if (++pring->sli.sli3.rspidx >= portRspMax)
2966 pring->sli.sli3.rspidx = 0;
875fbdfe 2967
87f6eaff
JSEC
2968 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2969 (uint32_t *) &rspiocbq.iocb,
ed957684 2970 phba->iocb_rsp_size);
a4bc3379 2971 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
2972 irsp = &rspiocbq.iocb;
2973
dea3101e 2974 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2975 pring->stats.iocb_rsp++;
2976 rsp_cmpl++;
2977
2978 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
2979 /*
2980 * If resource errors reported from HBA, reduce
2981 * queuedepths of the SCSI device.
2982 */
2983 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
2984 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2985 IOERR_NO_RESOURCES)) {
92d7f7b0 2986 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 2987 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
2988 spin_lock_irqsave(&phba->hbalock, iflag);
2989 }
2990
dea3101e 2991 /* Rsp ring <ringno> error: IOCB */
2992 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2993 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 2994 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 2995 pring->ringno,
92d7f7b0
JS
2996 irsp->un.ulpWord[0],
2997 irsp->un.ulpWord[1],
2998 irsp->un.ulpWord[2],
2999 irsp->un.ulpWord[3],
3000 irsp->un.ulpWord[4],
3001 irsp->un.ulpWord[5],
d7c255b2
JS
3002 *(uint32_t *)&irsp->un1,
3003 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3004 }
3005
3006 switch (type) {
3007 case LPFC_ABORT_IOCB:
3008 case LPFC_SOL_IOCB:
3009 /*
3010 * Idle exchange closed via ABTS from port. No iocb
3011 * resources need to be recovered.
3012 */
3013 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3015 "0333 IOCB cmd 0x%x"
dca9479b 3016 " processed. Skipping"
92d7f7b0 3017 " completion\n",
dca9479b 3018 irsp->ulpCommand);
dea3101e 3019 break;
3020 }
3021
604a3e30
JB
3022 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3023 &rspiocbq);
0f65ff68
JS
3024 if (unlikely(!cmdiocbq))
3025 break;
3026 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3027 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3028 if (cmdiocbq->iocb_cmpl) {
3029 spin_unlock_irqrestore(&phba->hbalock, iflag);
3030 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3031 &rspiocbq);
3032 spin_lock_irqsave(&phba->hbalock, iflag);
3033 }
dea3101e 3034 break;
a4bc3379 3035 case LPFC_UNSOL_IOCB:
2e0fef85 3036 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3037 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3038 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3039 break;
dea3101e 3040 default:
3041 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3042 char adaptermsg[LPFC_MAX_ADPTMSG];
3043 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3044 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3045 MAX_MSG_DATA);
898eb71c
JP
3046 dev_warn(&((phba->pcidev)->dev),
3047 "lpfc%d: %s\n",
dea3101e 3048 phba->brd_no, adaptermsg);
3049 } else {
3050 /* Unknown IOCB command */
3051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3052 "0334 Unknown IOCB command "
92d7f7b0 3053 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3054 type, irsp->ulpCommand,
92d7f7b0
JS
3055 irsp->ulpStatus,
3056 irsp->ulpIoTag,
3057 irsp->ulpContext);
dea3101e 3058 }
3059 break;
3060 }
3061
3062 /*
3063 * The response IOCB has been processed. Update the ring
3064 * pointer in SLIM. If the port response put pointer has not
3065 * been updated, sync the pgp->rspPutInx and fetch the new port
3066 * response put pointer.
3067 */
7e56aa25
JS
3068 writel(pring->sli.sli3.rspidx,
3069 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3070
7e56aa25 3071 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3072 portRspPut = le32_to_cpu(pgp->rspPutInx);
3073 }
3074
3075 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3076 pring->stats.iocb_rsp_full++;
3077 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3078 writel(status, phba->CAregaddr);
3079 readl(phba->CAregaddr);
3080 }
3081 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3082 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3083 pring->stats.iocb_cmd_empty++;
3084
3085 /* Force update of the local copy of cmdGetInx */
7e56aa25 3086 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3087 lpfc_sli_resume_iocb(phba, pring);
3088
3089 if ((pring->lpfc_sli_cmd_available))
3090 (pring->lpfc_sli_cmd_available) (phba, pring);
3091
3092 }
3093
45ed1190 3094 phba->fcp_ring_in_use = 0;
2e0fef85 3095 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3096 return rc;
3097}
3098
e59058c4 3099/**
3772a991
JS
3100 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3101 * @phba: Pointer to HBA context object.
3102 * @pring: Pointer to driver SLI ring object.
3103 * @rspiocbp: Pointer to driver response IOCB object.
3104 *
3105 * This function is called from the worker thread when there is a slow-path
3106 * response IOCB to process. This function chains all the response iocbs until
3107 * seeing the iocb with the LE bit set. The function will call
3108 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3109 * completion of a command iocb. The function will call the
3110 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3111 * The function frees the resources or calls the completion handler if this
3112 * iocb is an abort completion. The function returns NULL when the response
3113 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3114 * this function shall chain the iocb on to the iocb_continueq and return the
3115 * response iocb passed in.
3116 **/
3117static struct lpfc_iocbq *
3118lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3119 struct lpfc_iocbq *rspiocbp)
3120{
3121 struct lpfc_iocbq *saveq;
3122 struct lpfc_iocbq *cmdiocbp;
3123 struct lpfc_iocbq *next_iocb;
3124 IOCB_t *irsp = NULL;
3125 uint32_t free_saveq;
3126 uint8_t iocb_cmd_type;
3127 lpfc_iocb_type type;
3128 unsigned long iflag;
3129 int rc;
3130
3131 spin_lock_irqsave(&phba->hbalock, iflag);
3132 /* First add the response iocb to the countinueq list */
3133 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3134 pring->iocb_continueq_cnt++;
3135
70f23fd6 3136 /* Now, determine whether the list is completed for processing */
3772a991
JS
3137 irsp = &rspiocbp->iocb;
3138 if (irsp->ulpLe) {
3139 /*
3140 * By default, the driver expects to free all resources
3141 * associated with this iocb completion.
3142 */
3143 free_saveq = 1;
3144 saveq = list_get_first(&pring->iocb_continueq,
3145 struct lpfc_iocbq, list);
3146 irsp = &(saveq->iocb);
3147 list_del_init(&pring->iocb_continueq);
3148 pring->iocb_continueq_cnt = 0;
3149
3150 pring->stats.iocb_rsp++;
3151
3152 /*
3153 * If resource errors reported from HBA, reduce
3154 * queuedepths of the SCSI device.
3155 */
3156 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3157 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3158 IOERR_NO_RESOURCES)) {
3772a991
JS
3159 spin_unlock_irqrestore(&phba->hbalock, iflag);
3160 phba->lpfc_rampdown_queue_depth(phba);
3161 spin_lock_irqsave(&phba->hbalock, iflag);
3162 }
3163
3164 if (irsp->ulpStatus) {
3165 /* Rsp ring <ringno> error: IOCB */
3166 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3167 "0328 Rsp Ring %d error: "
3168 "IOCB Data: "
3169 "x%x x%x x%x x%x "
3170 "x%x x%x x%x x%x "
3171 "x%x x%x x%x x%x "
3172 "x%x x%x x%x x%x\n",
3173 pring->ringno,
3174 irsp->un.ulpWord[0],
3175 irsp->un.ulpWord[1],
3176 irsp->un.ulpWord[2],
3177 irsp->un.ulpWord[3],
3178 irsp->un.ulpWord[4],
3179 irsp->un.ulpWord[5],
3180 *(((uint32_t *) irsp) + 6),
3181 *(((uint32_t *) irsp) + 7),
3182 *(((uint32_t *) irsp) + 8),
3183 *(((uint32_t *) irsp) + 9),
3184 *(((uint32_t *) irsp) + 10),
3185 *(((uint32_t *) irsp) + 11),
3186 *(((uint32_t *) irsp) + 12),
3187 *(((uint32_t *) irsp) + 13),
3188 *(((uint32_t *) irsp) + 14),
3189 *(((uint32_t *) irsp) + 15));
3190 }
3191
3192 /*
3193 * Fetch the IOCB command type and call the correct completion
3194 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3195 * get freed back to the lpfc_iocb_list by the discovery
3196 * kernel thread.
3197 */
3198 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3199 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3200 switch (type) {
3201 case LPFC_SOL_IOCB:
3202 spin_unlock_irqrestore(&phba->hbalock, iflag);
3203 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3204 spin_lock_irqsave(&phba->hbalock, iflag);
3205 break;
3206
3207 case LPFC_UNSOL_IOCB:
3208 spin_unlock_irqrestore(&phba->hbalock, iflag);
3209 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3210 spin_lock_irqsave(&phba->hbalock, iflag);
3211 if (!rc)
3212 free_saveq = 0;
3213 break;
3214
3215 case LPFC_ABORT_IOCB:
3216 cmdiocbp = NULL;
3217 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3218 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3219 saveq);
3220 if (cmdiocbp) {
3221 /* Call the specified completion routine */
3222 if (cmdiocbp->iocb_cmpl) {
3223 spin_unlock_irqrestore(&phba->hbalock,
3224 iflag);
3225 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3226 saveq);
3227 spin_lock_irqsave(&phba->hbalock,
3228 iflag);
3229 } else
3230 __lpfc_sli_release_iocbq(phba,
3231 cmdiocbp);
3232 }
3233 break;
3234
3235 case LPFC_UNKNOWN_IOCB:
3236 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3237 char adaptermsg[LPFC_MAX_ADPTMSG];
3238 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3239 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3240 MAX_MSG_DATA);
3241 dev_warn(&((phba->pcidev)->dev),
3242 "lpfc%d: %s\n",
3243 phba->brd_no, adaptermsg);
3244 } else {
3245 /* Unknown IOCB command */
3246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3247 "0335 Unknown IOCB "
3248 "command Data: x%x "
3249 "x%x x%x x%x\n",
3250 irsp->ulpCommand,
3251 irsp->ulpStatus,
3252 irsp->ulpIoTag,
3253 irsp->ulpContext);
3254 }
3255 break;
3256 }
3257
3258 if (free_saveq) {
3259 list_for_each_entry_safe(rspiocbp, next_iocb,
3260 &saveq->list, list) {
3261 list_del(&rspiocbp->list);
3262 __lpfc_sli_release_iocbq(phba, rspiocbp);
3263 }
3264 __lpfc_sli_release_iocbq(phba, saveq);
3265 }
3266 rspiocbp = NULL;
3267 }
3268 spin_unlock_irqrestore(&phba->hbalock, iflag);
3269 return rspiocbp;
3270}
3271
3272/**
3273 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3274 * @phba: Pointer to HBA context object.
3275 * @pring: Pointer to driver SLI ring object.
3276 * @mask: Host attention register mask for this ring.
3277 *
3772a991
JS
3278 * This routine wraps the actual slow_ring event process routine from the
3279 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3280 **/
3772a991 3281void
2e0fef85
JS
3282lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3283 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3284{
3285 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3286}
3287
3288/**
3289 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3290 * @phba: Pointer to HBA context object.
3291 * @pring: Pointer to driver SLI ring object.
3292 * @mask: Host attention register mask for this ring.
3293 *
3294 * This function is called from the worker thread when there is a ring event
3295 * for non-fcp rings. The caller does not hold any lock. The function will
3296 * remove each response iocb in the response ring and calls the handle
3297 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3298 **/
3299static void
3300lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3301 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3302{
34b02dcd 3303 struct lpfc_pgp *pgp;
dea3101e 3304 IOCB_t *entry;
3305 IOCB_t *irsp = NULL;
3306 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3307 uint32_t portRspPut, portRspMax;
dea3101e 3308 unsigned long iflag;
3772a991 3309 uint32_t status;
dea3101e 3310
34b02dcd 3311 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3312 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3313 pring->stats.iocb_event++;
3314
dea3101e 3315 /*
3316 * The next available response entry should never exceed the maximum
3317 * entries. If it does, treat it as an adapter hardware error.
3318 */
7e56aa25 3319 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3320 portRspPut = le32_to_cpu(pgp->rspPutInx);
3321 if (portRspPut >= portRspMax) {
3322 /*
025dfdaf 3323 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3324 * rsp ring <portRspMax>
3325 */
ed957684 3326 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3327 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3328 "is bigger than rsp ring %d\n",
e8b62011 3329 pring->ringno, portRspPut, portRspMax);
dea3101e 3330
2e0fef85
JS
3331 phba->link_state = LPFC_HBA_ERROR;
3332 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3333
3334 phba->work_hs = HS_FFER3;
3335 lpfc_handle_eratt(phba);
3336
3772a991 3337 return;
dea3101e 3338 }
3339
3340 rmb();
7e56aa25 3341 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3342 /*
3343 * Build a completion list and call the appropriate handler.
3344 * The process is to get the next available response iocb, get
3345 * a free iocb from the list, copy the response data into the
3346 * free iocb, insert to the continuation list, and update the
3347 * next response index to slim. This process makes response
3348 * iocb's in the ring available to DMA as fast as possible but
3349 * pays a penalty for a copy operation. Since the iocb is
3350 * only 32 bytes, this penalty is considered small relative to
3351 * the PCI reads for register values and a slim write. When
3352 * the ulpLe field is set, the entire Command has been
3353 * received.
3354 */
ed957684
JS
3355 entry = lpfc_resp_iocb(phba, pring);
3356
858c9f6c 3357 phba->last_completion_time = jiffies;
2e0fef85 3358 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3359 if (rspiocbp == NULL) {
3360 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3361 "completion.\n", __func__);
dea3101e 3362 break;
3363 }
3364
ed957684
JS
3365 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3366 phba->iocb_rsp_size);
dea3101e 3367 irsp = &rspiocbp->iocb;
3368
7e56aa25
JS
3369 if (++pring->sli.sli3.rspidx >= portRspMax)
3370 pring->sli.sli3.rspidx = 0;
dea3101e 3371
a58cbd52
JS
3372 if (pring->ringno == LPFC_ELS_RING) {
3373 lpfc_debugfs_slow_ring_trc(phba,
3374 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3375 *(((uint32_t *) irsp) + 4),
3376 *(((uint32_t *) irsp) + 6),
3377 *(((uint32_t *) irsp) + 7));
3378 }
3379
7e56aa25
JS
3380 writel(pring->sli.sli3.rspidx,
3381 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3382
3772a991
JS
3383 spin_unlock_irqrestore(&phba->hbalock, iflag);
3384 /* Handle the response IOCB */
3385 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3386 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3387
3388 /*
3389 * If the port response put pointer has not been updated, sync
3390 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3391 * response put pointer.
3392 */
7e56aa25 3393 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3394 portRspPut = le32_to_cpu(pgp->rspPutInx);
3395 }
7e56aa25 3396 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3397
92d7f7b0 3398 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3399 /* At least one response entry has been freed */
3400 pring->stats.iocb_rsp_full++;
3401 /* SET RxRE_RSP in Chip Att register */
3402 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3403 writel(status, phba->CAregaddr);
3404 readl(phba->CAregaddr); /* flush */
3405 }
3406 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3407 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3408 pring->stats.iocb_cmd_empty++;
3409
3410 /* Force update of the local copy of cmdGetInx */
7e56aa25 3411 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3412 lpfc_sli_resume_iocb(phba, pring);
3413
3414 if ((pring->lpfc_sli_cmd_available))
3415 (pring->lpfc_sli_cmd_available) (phba, pring);
3416
3417 }
3418
2e0fef85 3419 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3420 return;
dea3101e 3421}
3422
4f774513
JS
3423/**
3424 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3425 * @phba: Pointer to HBA context object.
3426 * @pring: Pointer to driver SLI ring object.
3427 * @mask: Host attention register mask for this ring.
3428 *
3429 * This function is called from the worker thread when there is a pending
3430 * ELS response iocb on the driver internal slow-path response iocb worker
3431 * queue. The caller does not hold any lock. The function will remove each
3432 * response iocb from the response worker queue and calls the handle
3433 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3434 **/
3435static void
3436lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3437 struct lpfc_sli_ring *pring, uint32_t mask)
3438{
3439 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3440 struct hbq_dmabuf *dmabuf;
3441 struct lpfc_cq_event *cq_event;
4f774513
JS
3442 unsigned long iflag;
3443
45ed1190
JS
3444 spin_lock_irqsave(&phba->hbalock, iflag);
3445 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3446 spin_unlock_irqrestore(&phba->hbalock, iflag);
3447 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3448 /* Get the response iocb from the head of work queue */
3449 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3450 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3451 cq_event, struct lpfc_cq_event, list);
4f774513 3452 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3453
3454 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3455 case CQE_CODE_COMPL_WQE:
3456 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3457 cq_event);
45ed1190
JS
3458 /* Translate ELS WCQE to response IOCBQ */
3459 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3460 irspiocbq);
3461 if (irspiocbq)
3462 lpfc_sli_sp_handle_rspiocb(phba, pring,
3463 irspiocbq);
4d9ab994
JS
3464 break;
3465 case CQE_CODE_RECEIVE:
7851fe2c 3466 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3467 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3468 cq_event);
3469 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3470 break;
3471 default:
3472 break;
3473 }
4f774513
JS
3474 }
3475}
3476
e59058c4 3477/**
3621a710 3478 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3479 * @phba: Pointer to HBA context object.
3480 * @pring: Pointer to driver SLI ring object.
3481 *
3482 * This function aborts all iocbs in the given ring and frees all the iocb
3483 * objects in txq. This function issues an abort iocb for all the iocb commands
3484 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3485 * the return of this function. The caller is not required to hold any locks.
3486 **/
2e0fef85 3487void
dea3101e 3488lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3489{
2534ba75 3490 LIST_HEAD(completions);
dea3101e 3491 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3492
92d7f7b0
JS
3493 if (pring->ringno == LPFC_ELS_RING) {
3494 lpfc_fabric_abort_hba(phba);
3495 }
3496
dea3101e 3497 /* Error everything on txq and txcmplq
3498 * First do the txq.
3499 */
2e0fef85 3500 spin_lock_irq(&phba->hbalock);
2534ba75 3501 list_splice_init(&pring->txq, &completions);
dea3101e 3502 pring->txq_cnt = 0;
dea3101e 3503
3504 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
3505 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3506 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 3507
2e0fef85 3508 spin_unlock_irq(&phba->hbalock);
dea3101e 3509
a257bf90
JS
3510 /* Cancel all the IOCBs from the completions list */
3511 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3512 IOERR_SLI_ABORTED);
dea3101e 3513}
3514
a8e497d5 3515/**
3621a710 3516 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3517 * @phba: Pointer to HBA context object.
3518 *
3519 * This function flushes all iocbs in the fcp ring and frees all the iocb
3520 * objects in txq and txcmplq. This function will not issue abort iocbs
3521 * for all the iocb commands in txcmplq, they will just be returned with
3522 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3523 * slot has been permanently disabled.
3524 **/
3525void
3526lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3527{
3528 LIST_HEAD(txq);
3529 LIST_HEAD(txcmplq);
a8e497d5
JS
3530 struct lpfc_sli *psli = &phba->sli;
3531 struct lpfc_sli_ring *pring;
3532
3533 /* Currently, only one fcp ring */
3534 pring = &psli->ring[psli->fcp_ring];
3535
3536 spin_lock_irq(&phba->hbalock);
3537 /* Retrieve everything on txq */
3538 list_splice_init(&pring->txq, &txq);
3539 pring->txq_cnt = 0;
3540
3541 /* Retrieve everything on the txcmplq */
3542 list_splice_init(&pring->txcmplq, &txcmplq);
3543 pring->txcmplq_cnt = 0;
4f2e66c6
JS
3544
3545 /* Indicate the I/O queues are flushed */
3546 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
3547 spin_unlock_irq(&phba->hbalock);
3548
3549 /* Flush the txq */
a257bf90
JS
3550 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3551 IOERR_SLI_DOWN);
a8e497d5
JS
3552
3553 /* Flush the txcmpq */
a257bf90
JS
3554 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3555 IOERR_SLI_DOWN);
a8e497d5
JS
3556}
3557
e59058c4 3558/**
3772a991 3559 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
3560 * @phba: Pointer to HBA context object.
3561 * @mask: Bit mask to be checked.
3562 *
3563 * This function reads the host status register and compares
3564 * with the provided bit mask to check if HBA completed
3565 * the restart. This function will wait in a loop for the
3566 * HBA to complete restart. If the HBA does not restart within
3567 * 15 iterations, the function will reset the HBA again. The
3568 * function returns 1 when HBA fail to restart otherwise returns
3569 * zero.
3570 **/
3772a991
JS
3571static int
3572lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3573{
41415862
JW
3574 uint32_t status;
3575 int i = 0;
3576 int retval = 0;
dea3101e 3577
41415862 3578 /* Read the HBA Host Status Register */
9940b97b
JS
3579 if (lpfc_readl(phba->HSregaddr, &status))
3580 return 1;
dea3101e 3581
41415862
JW
3582 /*
3583 * Check status register every 100ms for 5 retries, then every
3584 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3585 * every 2.5 sec for 4.
3586 * Break our of the loop if errors occurred during init.
3587 */
3588 while (((status & mask) != mask) &&
3589 !(status & HS_FFERM) &&
3590 i++ < 20) {
dea3101e 3591
41415862
JW
3592 if (i <= 5)
3593 msleep(10);
3594 else if (i <= 10)
3595 msleep(500);
3596 else
3597 msleep(2500);
dea3101e 3598
41415862 3599 if (i == 15) {
2e0fef85 3600 /* Do post */
92d7f7b0 3601 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3602 lpfc_sli_brdrestart(phba);
3603 }
3604 /* Read the HBA Host Status Register */
9940b97b
JS
3605 if (lpfc_readl(phba->HSregaddr, &status)) {
3606 retval = 1;
3607 break;
3608 }
41415862 3609 }
dea3101e 3610
41415862
JW
3611 /* Check to see if any errors occurred during init */
3612 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
3613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3614 "2751 Adapter failed to restart, "
3615 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3616 status,
3617 readl(phba->MBslimaddr + 0xa8),
3618 readl(phba->MBslimaddr + 0xac));
2e0fef85 3619 phba->link_state = LPFC_HBA_ERROR;
41415862 3620 retval = 1;
dea3101e 3621 }
dea3101e 3622
41415862
JW
3623 return retval;
3624}
dea3101e 3625
da0436e9
JS
3626/**
3627 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3628 * @phba: Pointer to HBA context object.
3629 * @mask: Bit mask to be checked.
3630 *
3631 * This function checks the host status register to check if HBA is
3632 * ready. This function will wait in a loop for the HBA to be ready
3633 * If the HBA is not ready , the function will will reset the HBA PCI
3634 * function again. The function returns 1 when HBA fail to be ready
3635 * otherwise returns zero.
3636 **/
3637static int
3638lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3639{
3640 uint32_t status;
3641 int retval = 0;
3642
3643 /* Read the HBA Host Status Register */
3644 status = lpfc_sli4_post_status_check(phba);
3645
3646 if (status) {
3647 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3648 lpfc_sli_brdrestart(phba);
3649 status = lpfc_sli4_post_status_check(phba);
3650 }
3651
3652 /* Check to see if any errors occurred during init */
3653 if (status) {
3654 phba->link_state = LPFC_HBA_ERROR;
3655 retval = 1;
3656 } else
3657 phba->sli4_hba.intr_enable = 0;
3658
3659 return retval;
3660}
3661
3662/**
3663 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3664 * @phba: Pointer to HBA context object.
3665 * @mask: Bit mask to be checked.
3666 *
3667 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3668 * from the API jump table function pointer from the lpfc_hba struct.
3669 **/
3670int
3671lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3672{
3673 return phba->lpfc_sli_brdready(phba, mask);
3674}
3675
9290831f
JS
3676#define BARRIER_TEST_PATTERN (0xdeadbeef)
3677
e59058c4 3678/**
3621a710 3679 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3680 * @phba: Pointer to HBA context object.
3681 *
1b51197d
JS
3682 * This function is called before resetting an HBA. This function is called
3683 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 3684 **/
2e0fef85 3685void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3686{
65a29c16
JS
3687 uint32_t __iomem *resp_buf;
3688 uint32_t __iomem *mbox_buf;
9290831f 3689 volatile uint32_t mbox;
9940b97b 3690 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
3691 int i;
3692 uint8_t hdrtype;
3693
3694 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3695 if (hdrtype != 0x80 ||
3696 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3697 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3698 return;
3699
3700 /*
3701 * Tell the other part of the chip to suspend temporarily all
3702 * its DMA activity.
3703 */
65a29c16 3704 resp_buf = phba->MBslimaddr;
9290831f
JS
3705
3706 /* Disable the error attention */
9940b97b
JS
3707 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3708 return;
9290831f
JS
3709 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3710 readl(phba->HCregaddr); /* flush */
2e0fef85 3711 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 3712
9940b97b
JS
3713 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3714 return;
3715 if (ha_copy & HA_ERATT) {
9290831f
JS
3716 /* Clear Chip error bit */
3717 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3718 phba->pport->stopped = 1;
9290831f
JS
3719 }
3720
3721 mbox = 0;
3722 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3723 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3724
3725 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 3726 mbox_buf = phba->MBslimaddr;
9290831f
JS
3727 writel(mbox, mbox_buf);
3728
9940b97b
JS
3729 for (i = 0; i < 50; i++) {
3730 if (lpfc_readl((resp_buf + 1), &resp_data))
3731 return;
3732 if (resp_data != ~(BARRIER_TEST_PATTERN))
3733 mdelay(1);
3734 else
3735 break;
3736 }
3737 resp_data = 0;
3738 if (lpfc_readl((resp_buf + 1), &resp_data))
3739 return;
3740 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 3741 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 3742 phba->pport->stopped)
9290831f
JS
3743 goto restore_hc;
3744 else
3745 goto clear_errat;
3746 }
3747
3748 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
3749 resp_data = 0;
3750 for (i = 0; i < 500; i++) {
3751 if (lpfc_readl(resp_buf, &resp_data))
3752 return;
3753 if (resp_data != mbox)
3754 mdelay(1);
3755 else
3756 break;
3757 }
9290831f
JS
3758
3759clear_errat:
3760
9940b97b
JS
3761 while (++i < 500) {
3762 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3763 return;
3764 if (!(ha_copy & HA_ERATT))
3765 mdelay(1);
3766 else
3767 break;
3768 }
9290831f
JS
3769
3770 if (readl(phba->HAregaddr) & HA_ERATT) {
3771 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3772 phba->pport->stopped = 1;
9290831f
JS
3773 }
3774
3775restore_hc:
2e0fef85 3776 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
3777 writel(hc_copy, phba->HCregaddr);
3778 readl(phba->HCregaddr); /* flush */
3779}
3780
e59058c4 3781/**
3621a710 3782 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
3783 * @phba: Pointer to HBA context object.
3784 *
3785 * This function issues a kill_board mailbox command and waits for
3786 * the error attention interrupt. This function is called for stopping
3787 * the firmware processing. The caller is not required to hold any
3788 * locks. This function calls lpfc_hba_down_post function to free
3789 * any pending commands after the kill. The function will return 1 when it
3790 * fails to kill the board else will return 0.
3791 **/
41415862 3792int
2e0fef85 3793lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
3794{
3795 struct lpfc_sli *psli;
3796 LPFC_MBOXQ_t *pmb;
3797 uint32_t status;
3798 uint32_t ha_copy;
3799 int retval;
3800 int i = 0;
dea3101e 3801
41415862 3802 psli = &phba->sli;
dea3101e 3803
41415862 3804 /* Kill HBA */
ed957684 3805 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3806 "0329 Kill HBA Data: x%x x%x\n",
3807 phba->pport->port_state, psli->sli_flag);
41415862 3808
98c9ea5c
JS
3809 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3810 if (!pmb)
41415862 3811 return 1;
41415862
JW
3812
3813 /* Disable the error attention */
2e0fef85 3814 spin_lock_irq(&phba->hbalock);
9940b97b
JS
3815 if (lpfc_readl(phba->HCregaddr, &status)) {
3816 spin_unlock_irq(&phba->hbalock);
3817 mempool_free(pmb, phba->mbox_mem_pool);
3818 return 1;
3819 }
41415862
JW
3820 status &= ~HC_ERINT_ENA;
3821 writel(status, phba->HCregaddr);
3822 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
3823 phba->link_flag |= LS_IGNORE_ERATT;
3824 spin_unlock_irq(&phba->hbalock);
41415862
JW
3825
3826 lpfc_kill_board(phba, pmb);
3827 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3828 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3829
3830 if (retval != MBX_SUCCESS) {
3831 if (retval != MBX_BUSY)
3832 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
3833 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3834 "2752 KILL_BOARD command failed retval %d\n",
3835 retval);
2e0fef85
JS
3836 spin_lock_irq(&phba->hbalock);
3837 phba->link_flag &= ~LS_IGNORE_ERATT;
3838 spin_unlock_irq(&phba->hbalock);
41415862
JW
3839 return 1;
3840 }
3841
f4b4c68f
JS
3842 spin_lock_irq(&phba->hbalock);
3843 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3844 spin_unlock_irq(&phba->hbalock);
9290831f 3845
41415862
JW
3846 mempool_free(pmb, phba->mbox_mem_pool);
3847
3848 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3849 * attention every 100ms for 3 seconds. If we don't get ERATT after
3850 * 3 seconds we still set HBA_ERROR state because the status of the
3851 * board is now undefined.
3852 */
9940b97b
JS
3853 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3854 return 1;
41415862
JW
3855 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3856 mdelay(100);
9940b97b
JS
3857 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3858 return 1;
41415862
JW
3859 }
3860
3861 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
3862 if (ha_copy & HA_ERATT) {
3863 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3864 phba->pport->stopped = 1;
9290831f 3865 }
2e0fef85 3866 spin_lock_irq(&phba->hbalock);
41415862 3867 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 3868 psli->mbox_active = NULL;
2e0fef85
JS
3869 phba->link_flag &= ~LS_IGNORE_ERATT;
3870 spin_unlock_irq(&phba->hbalock);
41415862 3871
41415862 3872 lpfc_hba_down_post(phba);
2e0fef85 3873 phba->link_state = LPFC_HBA_ERROR;
41415862 3874
2e0fef85 3875 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 3876}
3877
e59058c4 3878/**
3772a991 3879 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
3880 * @phba: Pointer to HBA context object.
3881 *
3882 * This function resets the HBA by writing HC_INITFF to the control
3883 * register. After the HBA resets, this function resets all the iocb ring
3884 * indices. This function disables PCI layer parity checking during
3885 * the reset.
3886 * This function returns 0 always.
3887 * The caller is not required to hold any locks.
3888 **/
41415862 3889int
2e0fef85 3890lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 3891{
41415862 3892 struct lpfc_sli *psli;
dea3101e 3893 struct lpfc_sli_ring *pring;
41415862 3894 uint16_t cfg_value;
dea3101e 3895 int i;
dea3101e 3896
41415862 3897 psli = &phba->sli;
dea3101e 3898
41415862
JW
3899 /* Reset HBA */
3900 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3901 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 3902 phba->pport->port_state, psli->sli_flag);
dea3101e 3903
3904 /* perform board reset */
3905 phba->fc_eventTag = 0;
4d9ab994 3906 phba->link_events = 0;
2e0fef85
JS
3907 phba->pport->fc_myDID = 0;
3908 phba->pport->fc_prevDID = 0;
dea3101e 3909
41415862
JW
3910 /* Turn off parity checking and serr during the physical reset */
3911 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3912 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3913 (cfg_value &
3914 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3915
3772a991
JS
3916 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3917
41415862
JW
3918 /* Now toggle INITFF bit in the Host Control Register */
3919 writel(HC_INITFF, phba->HCregaddr);
3920 mdelay(1);
3921 readl(phba->HCregaddr); /* flush */
3922 writel(0, phba->HCregaddr);
3923 readl(phba->HCregaddr); /* flush */
3924
3925 /* Restore PCI cmd register */
3926 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 3927
3928 /* Initialize relevant SLI info */
41415862
JW
3929 for (i = 0; i < psli->num_rings; i++) {
3930 pring = &psli->ring[i];
dea3101e 3931 pring->flag = 0;
7e56aa25
JS
3932 pring->sli.sli3.rspidx = 0;
3933 pring->sli.sli3.next_cmdidx = 0;
3934 pring->sli.sli3.local_getidx = 0;
3935 pring->sli.sli3.cmdidx = 0;
dea3101e 3936 pring->missbufcnt = 0;
3937 }
dea3101e 3938
2e0fef85 3939 phba->link_state = LPFC_WARM_START;
41415862
JW
3940 return 0;
3941}
3942
e59058c4 3943/**
da0436e9
JS
3944 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3945 * @phba: Pointer to HBA context object.
3946 *
3947 * This function resets a SLI4 HBA. This function disables PCI layer parity
3948 * checking during resets the device. The caller is not required to hold
3949 * any locks.
3950 *
3951 * This function returns 0 always.
3952 **/
3953int
3954lpfc_sli4_brdreset(struct lpfc_hba *phba)
3955{
3956 struct lpfc_sli *psli = &phba->sli;
3957 uint16_t cfg_value;
27b01b82 3958 int rc;
da0436e9
JS
3959
3960 /* Reset HBA */
3961 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3962 "0295 Reset HBA Data: x%x x%x\n",
3963 phba->pport->port_state, psli->sli_flag);
3964
3965 /* perform board reset */
3966 phba->fc_eventTag = 0;
4d9ab994 3967 phba->link_events = 0;
da0436e9
JS
3968 phba->pport->fc_myDID = 0;
3969 phba->pport->fc_prevDID = 0;
3970
da0436e9
JS
3971 spin_lock_irq(&phba->hbalock);
3972 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3973 phba->fcf.fcf_flag = 0;
da0436e9
JS
3974 spin_unlock_irq(&phba->hbalock);
3975
3976 /* Now physically reset the device */
3977 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3978 "0389 Performing PCI function reset!\n");
be858b65
JS
3979
3980 /* Turn off parity checking and serr during the physical reset */
3981 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3982 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3983 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3984
88318816 3985 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 3986 rc = lpfc_pci_function_reset(phba);
88318816 3987 lpfc_sli4_queue_destroy(phba);
da0436e9 3988
be858b65
JS
3989 /* Restore PCI cmd register */
3990 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3991
27b01b82 3992 return rc;
da0436e9
JS
3993}
3994
3995/**
3996 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
3997 * @phba: Pointer to HBA context object.
3998 *
3999 * This function is called in the SLI initialization code path to
4000 * restart the HBA. The caller is not required to hold any lock.
4001 * This function writes MBX_RESTART mailbox command to the SLIM and
4002 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4003 * function to free any pending commands. The function enables
4004 * POST only during the first initialization. The function returns zero.
4005 * The function does not guarantee completion of MBX_RESTART mailbox
4006 * command before the return of this function.
4007 **/
da0436e9
JS
4008static int
4009lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4010{
4011 MAILBOX_t *mb;
4012 struct lpfc_sli *psli;
41415862
JW
4013 volatile uint32_t word0;
4014 void __iomem *to_slim;
0d878419 4015 uint32_t hba_aer_enabled;
41415862 4016
2e0fef85 4017 spin_lock_irq(&phba->hbalock);
41415862 4018
0d878419
JS
4019 /* Take PCIe device Advanced Error Reporting (AER) state */
4020 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4021
41415862
JW
4022 psli = &phba->sli;
4023
4024 /* Restart HBA */
4025 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4026 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 4027 phba->pport->port_state, psli->sli_flag);
41415862
JW
4028
4029 word0 = 0;
4030 mb = (MAILBOX_t *) &word0;
4031 mb->mbxCommand = MBX_RESTART;
4032 mb->mbxHc = 1;
4033
9290831f
JS
4034 lpfc_reset_barrier(phba);
4035
41415862
JW
4036 to_slim = phba->MBslimaddr;
4037 writel(*(uint32_t *) mb, to_slim);
4038 readl(to_slim); /* flush */
4039
4040 /* Only skip post after fc_ffinit is completed */
eaf15d5b 4041 if (phba->pport->port_state)
41415862 4042 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4043 else
41415862 4044 word0 = 0; /* This is really setting up word1 */
65a29c16 4045 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4046 writel(*(uint32_t *) mb, to_slim);
4047 readl(to_slim); /* flush */
dea3101e 4048
41415862 4049 lpfc_sli_brdreset(phba);
2e0fef85
JS
4050 phba->pport->stopped = 0;
4051 phba->link_state = LPFC_INIT_START;
da0436e9 4052 phba->hba_flag = 0;
2e0fef85 4053 spin_unlock_irq(&phba->hbalock);
41415862 4054
64ba8818
JS
4055 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4056 psli->stats_start = get_seconds();
4057
eaf15d5b
JS
4058 /* Give the INITFF and Post time to settle. */
4059 mdelay(100);
41415862 4060
0d878419
JS
4061 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4062 if (hba_aer_enabled)
4063 pci_disable_pcie_error_reporting(phba->pcidev);
4064
41415862 4065 lpfc_hba_down_post(phba);
dea3101e 4066
4067 return 0;
4068}
4069
da0436e9
JS
4070/**
4071 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4072 * @phba: Pointer to HBA context object.
4073 *
4074 * This function is called in the SLI initialization code path to restart
4075 * a SLI4 HBA. The caller is not required to hold any lock.
4076 * At the end of the function, it calls lpfc_hba_down_post function to
4077 * free any pending commands.
4078 **/
4079static int
4080lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4081{
4082 struct lpfc_sli *psli = &phba->sli;
75baf696 4083 uint32_t hba_aer_enabled;
27b01b82 4084 int rc;
da0436e9
JS
4085
4086 /* Restart HBA */
4087 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4088 "0296 Restart HBA Data: x%x x%x\n",
4089 phba->pport->port_state, psli->sli_flag);
4090
75baf696
JS
4091 /* Take PCIe device Advanced Error Reporting (AER) state */
4092 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4093
27b01b82 4094 rc = lpfc_sli4_brdreset(phba);
da0436e9
JS
4095
4096 spin_lock_irq(&phba->hbalock);
4097 phba->pport->stopped = 0;
4098 phba->link_state = LPFC_INIT_START;
4099 phba->hba_flag = 0;
4100 spin_unlock_irq(&phba->hbalock);
4101
4102 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4103 psli->stats_start = get_seconds();
4104
75baf696
JS
4105 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4106 if (hba_aer_enabled)
4107 pci_disable_pcie_error_reporting(phba->pcidev);
4108
da0436e9
JS
4109 lpfc_hba_down_post(phba);
4110
27b01b82 4111 return rc;
da0436e9
JS
4112}
4113
4114/**
4115 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4116 * @phba: Pointer to HBA context object.
4117 *
4118 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4119 * API jump table function pointer from the lpfc_hba struct.
4120**/
4121int
4122lpfc_sli_brdrestart(struct lpfc_hba *phba)
4123{
4124 return phba->lpfc_sli_brdrestart(phba);
4125}
4126
e59058c4 4127/**
3621a710 4128 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4129 * @phba: Pointer to HBA context object.
4130 *
4131 * This function is called after a HBA restart to wait for successful
4132 * restart of the HBA. Successful restart of the HBA is indicated by
4133 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4134 * iteration, the function will restart the HBA again. The function returns
4135 * zero if HBA successfully restarted else returns negative error code.
4136 **/
dea3101e 4137static int
4138lpfc_sli_chipset_init(struct lpfc_hba *phba)
4139{
4140 uint32_t status, i = 0;
4141
4142 /* Read the HBA Host Status Register */
9940b97b
JS
4143 if (lpfc_readl(phba->HSregaddr, &status))
4144 return -EIO;
dea3101e 4145
4146 /* Check status register to see what current state is */
4147 i = 0;
4148 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4149
dcf2a4e0
JS
4150 /* Check every 10ms for 10 retries, then every 100ms for 90
4151 * retries, then every 1 sec for 50 retires for a total of
4152 * ~60 seconds before reset the board again and check every
4153 * 1 sec for 50 retries. The up to 60 seconds before the
4154 * board ready is required by the Falcon FIPS zeroization
4155 * complete, and any reset the board in between shall cause
4156 * restart of zeroization, further delay the board ready.
dea3101e 4157 */
dcf2a4e0 4158 if (i++ >= 200) {
dea3101e 4159 /* Adapter failed to init, timeout, status reg
4160 <status> */
ed957684 4161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4162 "0436 Adapter failed to init, "
09372820
JS
4163 "timeout, status reg x%x, "
4164 "FW Data: A8 x%x AC x%x\n", status,
4165 readl(phba->MBslimaddr + 0xa8),
4166 readl(phba->MBslimaddr + 0xac));
2e0fef85 4167 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4168 return -ETIMEDOUT;
4169 }
4170
4171 /* Check to see if any errors occurred during init */
4172 if (status & HS_FFERM) {
4173 /* ERROR: During chipset initialization */
4174 /* Adapter failed to init, chipset, status reg
4175 <status> */
ed957684 4176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4177 "0437 Adapter failed to init, "
09372820
JS
4178 "chipset, status reg x%x, "
4179 "FW Data: A8 x%x AC x%x\n", status,
4180 readl(phba->MBslimaddr + 0xa8),
4181 readl(phba->MBslimaddr + 0xac));
2e0fef85 4182 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4183 return -EIO;
4184 }
4185
dcf2a4e0 4186 if (i <= 10)
dea3101e 4187 msleep(10);
dcf2a4e0
JS
4188 else if (i <= 100)
4189 msleep(100);
4190 else
4191 msleep(1000);
dea3101e 4192
dcf2a4e0
JS
4193 if (i == 150) {
4194 /* Do post */
92d7f7b0 4195 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4196 lpfc_sli_brdrestart(phba);
dea3101e 4197 }
4198 /* Read the HBA Host Status Register */
9940b97b
JS
4199 if (lpfc_readl(phba->HSregaddr, &status))
4200 return -EIO;
dea3101e 4201 }
4202
4203 /* Check to see if any errors occurred during init */
4204 if (status & HS_FFERM) {
4205 /* ERROR: During chipset initialization */
4206 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4208 "0438 Adapter failed to init, chipset, "
09372820
JS
4209 "status reg x%x, "
4210 "FW Data: A8 x%x AC x%x\n", status,
4211 readl(phba->MBslimaddr + 0xa8),
4212 readl(phba->MBslimaddr + 0xac));
2e0fef85 4213 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4214 return -EIO;
4215 }
4216
4217 /* Clear all interrupt enable conditions */
4218 writel(0, phba->HCregaddr);
4219 readl(phba->HCregaddr); /* flush */
4220
4221 /* setup host attn register */
4222 writel(0xffffffff, phba->HAregaddr);
4223 readl(phba->HAregaddr); /* flush */
4224 return 0;
4225}
4226
e59058c4 4227/**
3621a710 4228 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4229 *
4230 * This function calculates and returns the number of HBQs required to be
4231 * configured.
4232 **/
78b2d852 4233int
ed957684
JS
4234lpfc_sli_hbq_count(void)
4235{
92d7f7b0 4236 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4237}
4238
e59058c4 4239/**
3621a710 4240 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4241 *
4242 * This function adds the number of hbq entries in every HBQ to get
4243 * the total number of hbq entries required for the HBA and returns
4244 * the total count.
4245 **/
ed957684
JS
4246static int
4247lpfc_sli_hbq_entry_count(void)
4248{
4249 int hbq_count = lpfc_sli_hbq_count();
4250 int count = 0;
4251 int i;
4252
4253 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4254 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4255 return count;
4256}
4257
e59058c4 4258/**
3621a710 4259 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4260 *
4261 * This function calculates amount of memory required for all hbq entries
4262 * to be configured and returns the total memory required.
4263 **/
dea3101e 4264int
ed957684
JS
4265lpfc_sli_hbq_size(void)
4266{
4267 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4268}
4269
e59058c4 4270/**
3621a710 4271 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4272 * @phba: Pointer to HBA context object.
4273 *
4274 * This function is called during the SLI initialization to configure
4275 * all the HBQs and post buffers to the HBQ. The caller is not
4276 * required to hold any locks. This function will return zero if successful
4277 * else it will return negative error code.
4278 **/
ed957684
JS
4279static int
4280lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4281{
4282 int hbq_count = lpfc_sli_hbq_count();
4283 LPFC_MBOXQ_t *pmb;
4284 MAILBOX_t *pmbox;
4285 uint32_t hbqno;
4286 uint32_t hbq_entry_index;
ed957684 4287
92d7f7b0
JS
4288 /* Get a Mailbox buffer to setup mailbox
4289 * commands for HBA initialization
4290 */
ed957684
JS
4291 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4292
4293 if (!pmb)
4294 return -ENOMEM;
4295
04c68496 4296 pmbox = &pmb->u.mb;
ed957684
JS
4297
4298 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4299 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4300 phba->hbq_in_use = 1;
ed957684
JS
4301
4302 hbq_entry_index = 0;
4303 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4304 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4305 phba->hbqs[hbqno].hbqPutIdx = 0;
4306 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4307 phba->hbqs[hbqno].entry_count =
92d7f7b0 4308 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4309 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4310 hbq_entry_index, pmb);
ed957684
JS
4311 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4312
4313 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4314 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4315 mbxStatus <status>, ring <num> */
4316
4317 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4318 LOG_SLI | LOG_VPORT,
e8b62011 4319 "1805 Adapter failed to init. "
ed957684 4320 "Data: x%x x%x x%x\n",
e8b62011 4321 pmbox->mbxCommand,
ed957684
JS
4322 pmbox->mbxStatus, hbqno);
4323
4324 phba->link_state = LPFC_HBA_ERROR;
4325 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4326 return -ENXIO;
ed957684
JS
4327 }
4328 }
4329 phba->hbq_count = hbq_count;
4330
ed957684
JS
4331 mempool_free(pmb, phba->mbox_mem_pool);
4332
92d7f7b0 4333 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4334 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4335 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4336 return 0;
4337}
4338
4f774513
JS
4339/**
4340 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4341 * @phba: Pointer to HBA context object.
4342 *
4343 * This function is called during the SLI initialization to configure
4344 * all the HBQs and post buffers to the HBQ. The caller is not
4345 * required to hold any locks. This function will return zero if successful
4346 * else it will return negative error code.
4347 **/
4348static int
4349lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4350{
4351 phba->hbq_in_use = 1;
4352 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4353 phba->hbq_count = 1;
4354 /* Initially populate or replenish the HBQs */
4355 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4356 return 0;
4357}
4358
e59058c4 4359/**
3621a710 4360 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4361 * @phba: Pointer to HBA context object.
4362 * @sli_mode: sli mode - 2/3
4363 *
4364 * This function is called by the sli intialization code path
4365 * to issue config_port mailbox command. This function restarts the
4366 * HBA firmware and issues a config_port mailbox command to configure
4367 * the SLI interface in the sli mode specified by sli_mode
4368 * variable. The caller is not required to hold any locks.
4369 * The function returns 0 if successful, else returns negative error
4370 * code.
4371 **/
9399627f
JS
4372int
4373lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4374{
4375 LPFC_MBOXQ_t *pmb;
4376 uint32_t resetcount = 0, rc = 0, done = 0;
4377
4378 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4379 if (!pmb) {
2e0fef85 4380 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4381 return -ENOMEM;
4382 }
4383
ed957684 4384 phba->sli_rev = sli_mode;
dea3101e 4385 while (resetcount < 2 && !done) {
2e0fef85 4386 spin_lock_irq(&phba->hbalock);
1c067a42 4387 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4388 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4389 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4390 lpfc_sli_brdrestart(phba);
dea3101e 4391 rc = lpfc_sli_chipset_init(phba);
4392 if (rc)
4393 break;
4394
2e0fef85 4395 spin_lock_irq(&phba->hbalock);
1c067a42 4396 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4397 spin_unlock_irq(&phba->hbalock);
dea3101e 4398 resetcount++;
4399
ed957684
JS
4400 /* Call pre CONFIG_PORT mailbox command initialization. A
4401 * value of 0 means the call was successful. Any other
4402 * nonzero value is a failure, but if ERESTART is returned,
4403 * the driver may reset the HBA and try again.
4404 */
dea3101e 4405 rc = lpfc_config_port_prep(phba);
4406 if (rc == -ERESTART) {
ed957684 4407 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4408 continue;
34b02dcd 4409 } else if (rc)
dea3101e 4410 break;
6d368e53 4411
2e0fef85 4412 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 4413 lpfc_config_port(phba, pmb);
4414 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4415 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4416 LPFC_SLI3_HBQ_ENABLED |
4417 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
4418 LPFC_SLI3_BG_ENABLED |
4419 LPFC_SLI3_DSS_ENABLED);
ed957684 4420 if (rc != MBX_SUCCESS) {
dea3101e 4421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4422 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 4423 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 4424 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 4425 spin_lock_irq(&phba->hbalock);
04c68496 4426 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
4427 spin_unlock_irq(&phba->hbalock);
4428 rc = -ENXIO;
04c68496
JS
4429 } else {
4430 /* Allow asynchronous mailbox command to go through */
4431 spin_lock_irq(&phba->hbalock);
4432 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4433 spin_unlock_irq(&phba->hbalock);
ed957684 4434 done = 1;
cb69f7de
JS
4435
4436 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4437 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4438 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4439 "3110 Port did not grant ASABT\n");
04c68496 4440 }
dea3101e 4441 }
ed957684
JS
4442 if (!done) {
4443 rc = -EINVAL;
4444 goto do_prep_failed;
4445 }
04c68496
JS
4446 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4447 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
4448 rc = -ENXIO;
4449 goto do_prep_failed;
4450 }
04c68496 4451 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 4452 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
4453 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4454 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4455 phba->max_vpi : phba->max_vports;
4456
34b02dcd
JS
4457 } else
4458 phba->max_vpi = 0;
bc73905a
JS
4459 phba->fips_level = 0;
4460 phba->fips_spec_rev = 0;
4461 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 4462 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
4463 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4464 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4466 "2850 Security Crypto Active. FIPS x%d "
4467 "(Spec Rev: x%d)",
4468 phba->fips_level, phba->fips_spec_rev);
4469 }
4470 if (pmb->u.mb.un.varCfgPort.sec_err) {
4471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4472 "2856 Config Port Security Crypto "
4473 "Error: x%x ",
4474 pmb->u.mb.un.varCfgPort.sec_err);
4475 }
04c68496 4476 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 4477 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 4478 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 4479 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
4480
4481 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4482 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6
JS
4483
4484 if (phba->cfg_enable_bg) {
04c68496 4485 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
4486 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4487 else
4488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4489 "0443 Adapter did not grant "
4490 "BlockGuard\n");
4491 }
34b02dcd 4492 } else {
8f34f4ce 4493 phba->hbq_get = NULL;
34b02dcd 4494 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 4495 phba->max_vpi = 0;
ed957684 4496 }
92d7f7b0 4497do_prep_failed:
ed957684
JS
4498 mempool_free(pmb, phba->mbox_mem_pool);
4499 return rc;
4500}
4501
e59058c4
JS
4502
4503/**
3621a710 4504 * lpfc_sli_hba_setup - SLI intialization function
e59058c4
JS
4505 * @phba: Pointer to HBA context object.
4506 *
4507 * This function is the main SLI intialization function. This function
4508 * is called by the HBA intialization code, HBA reset code and HBA
4509 * error attention handler code. Caller is not required to hold any
4510 * locks. This function issues config_port mailbox command to configure
4511 * the SLI, setup iocb rings and HBQ rings. In the end the function
4512 * calls the config_port_post function to issue init_link mailbox
4513 * command and to start the discovery. The function will return zero
4514 * if successful, else it will return negative error code.
4515 **/
ed957684
JS
4516int
4517lpfc_sli_hba_setup(struct lpfc_hba *phba)
4518{
4519 uint32_t rc;
6d368e53
JS
4520 int mode = 3, i;
4521 int longs;
ed957684
JS
4522
4523 switch (lpfc_sli_mode) {
4524 case 2:
78b2d852 4525 if (phba->cfg_enable_npiv) {
92d7f7b0 4526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 4527 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 4528 "parameter (%d) to auto (0).\n",
e8b62011 4529 lpfc_sli_mode);
92d7f7b0
JS
4530 break;
4531 }
ed957684
JS
4532 mode = 2;
4533 break;
4534 case 0:
4535 case 3:
4536 break;
4537 default:
92d7f7b0 4538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4539 "1819 Unrecognized lpfc_sli_mode "
4540 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
4541
4542 break;
4543 }
4544
9399627f
JS
4545 rc = lpfc_sli_config_port(phba, mode);
4546
ed957684 4547 if (rc && lpfc_sli_mode == 3)
92d7f7b0 4548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4549 "1820 Unable to select SLI-3. "
4550 "Not supported by adapter.\n");
ed957684 4551 if (rc && mode != 2)
9399627f 4552 rc = lpfc_sli_config_port(phba, 2);
ed957684 4553 if (rc)
dea3101e 4554 goto lpfc_sli_hba_setup_error;
4555
0d878419
JS
4556 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4557 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4558 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4559 if (!rc) {
4560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4561 "2709 This device supports "
4562 "Advanced Error Reporting (AER)\n");
4563 spin_lock_irq(&phba->hbalock);
4564 phba->hba_flag |= HBA_AER_ENABLED;
4565 spin_unlock_irq(&phba->hbalock);
4566 } else {
4567 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4568 "2708 This device does not support "
4569 "Advanced Error Reporting (AER)\n");
4570 phba->cfg_aer_support = 0;
4571 }
4572 }
4573
ed957684
JS
4574 if (phba->sli_rev == 3) {
4575 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4576 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
4577 } else {
4578 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4579 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 4580 phba->sli3_options = 0;
ed957684
JS
4581 }
4582
4583 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4584 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4585 phba->sli_rev, phba->max_vpi);
ed957684 4586 rc = lpfc_sli_ring_map(phba);
dea3101e 4587
4588 if (rc)
4589 goto lpfc_sli_hba_setup_error;
4590
6d368e53
JS
4591 /* Initialize VPIs. */
4592 if (phba->sli_rev == LPFC_SLI_REV3) {
4593 /*
4594 * The VPI bitmask and physical ID array are allocated
4595 * and initialized once only - at driver load. A port
4596 * reset doesn't need to reinitialize this memory.
4597 */
4598 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4599 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4600 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4601 GFP_KERNEL);
4602 if (!phba->vpi_bmask) {
4603 rc = -ENOMEM;
4604 goto lpfc_sli_hba_setup_error;
4605 }
4606
4607 phba->vpi_ids = kzalloc(
4608 (phba->max_vpi+1) * sizeof(uint16_t),
4609 GFP_KERNEL);
4610 if (!phba->vpi_ids) {
4611 kfree(phba->vpi_bmask);
4612 rc = -ENOMEM;
4613 goto lpfc_sli_hba_setup_error;
4614 }
4615 for (i = 0; i < phba->max_vpi; i++)
4616 phba->vpi_ids[i] = i;
4617 }
4618 }
4619
9399627f 4620 /* Init HBQs */
ed957684
JS
4621 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4622 rc = lpfc_sli_hbq_setup(phba);
4623 if (rc)
4624 goto lpfc_sli_hba_setup_error;
4625 }
04c68496 4626 spin_lock_irq(&phba->hbalock);
dea3101e 4627 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 4628 spin_unlock_irq(&phba->hbalock);
dea3101e 4629
4630 rc = lpfc_config_port_post(phba);
4631 if (rc)
4632 goto lpfc_sli_hba_setup_error;
4633
ed957684
JS
4634 return rc;
4635
92d7f7b0 4636lpfc_sli_hba_setup_error:
2e0fef85 4637 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 4638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4639 "0445 Firmware initialization failed\n");
dea3101e 4640 return rc;
4641}
4642
e59058c4 4643/**
da0436e9
JS
4644 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4645 * @phba: Pointer to HBA context object.
4646 * @mboxq: mailbox pointer.
4647 * This function issue a dump mailbox command to read config region
4648 * 23 and parse the records in the region and populate driver
4649 * data structure.
e59058c4 4650 **/
da0436e9 4651static int
ff78d8f9 4652lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 4653{
ff78d8f9 4654 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
4655 struct lpfc_dmabuf *mp;
4656 struct lpfc_mqe *mqe;
4657 uint32_t data_length;
4658 int rc;
dea3101e 4659
da0436e9
JS
4660 /* Program the default value of vlan_id and fc_map */
4661 phba->valid_vlan = 0;
4662 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4663 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4664 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4665
ff78d8f9
JS
4666 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4667 if (!mboxq)
da0436e9
JS
4668 return -ENOMEM;
4669
ff78d8f9
JS
4670 mqe = &mboxq->u.mqe;
4671 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4672 rc = -ENOMEM;
4673 goto out_free_mboxq;
4674 }
4675
da0436e9
JS
4676 mp = (struct lpfc_dmabuf *) mboxq->context1;
4677 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4678
4679 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4680 "(%d):2571 Mailbox cmd x%x Status x%x "
4681 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4682 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4683 "CQ: x%x x%x x%x x%x\n",
4684 mboxq->vport ? mboxq->vport->vpi : 0,
4685 bf_get(lpfc_mqe_command, mqe),
4686 bf_get(lpfc_mqe_status, mqe),
4687 mqe->un.mb_words[0], mqe->un.mb_words[1],
4688 mqe->un.mb_words[2], mqe->un.mb_words[3],
4689 mqe->un.mb_words[4], mqe->un.mb_words[5],
4690 mqe->un.mb_words[6], mqe->un.mb_words[7],
4691 mqe->un.mb_words[8], mqe->un.mb_words[9],
4692 mqe->un.mb_words[10], mqe->un.mb_words[11],
4693 mqe->un.mb_words[12], mqe->un.mb_words[13],
4694 mqe->un.mb_words[14], mqe->un.mb_words[15],
4695 mqe->un.mb_words[16], mqe->un.mb_words[50],
4696 mboxq->mcqe.word0,
4697 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4698 mboxq->mcqe.trailer);
4699
4700 if (rc) {
4701 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4702 kfree(mp);
ff78d8f9
JS
4703 rc = -EIO;
4704 goto out_free_mboxq;
da0436e9
JS
4705 }
4706 data_length = mqe->un.mb_words[5];
a0c87cbd 4707 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
4708 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4709 kfree(mp);
ff78d8f9
JS
4710 rc = -EIO;
4711 goto out_free_mboxq;
d11e31dd 4712 }
dea3101e 4713
da0436e9
JS
4714 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4715 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4716 kfree(mp);
ff78d8f9
JS
4717 rc = 0;
4718
4719out_free_mboxq:
4720 mempool_free(mboxq, phba->mbox_mem_pool);
4721 return rc;
da0436e9 4722}
e59058c4
JS
4723
4724/**
da0436e9
JS
4725 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4726 * @phba: pointer to lpfc hba data structure.
4727 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4728 * @vpd: pointer to the memory to hold resulting port vpd data.
4729 * @vpd_size: On input, the number of bytes allocated to @vpd.
4730 * On output, the number of data bytes in @vpd.
e59058c4 4731 *
da0436e9
JS
4732 * This routine executes a READ_REV SLI4 mailbox command. In
4733 * addition, this routine gets the port vpd data.
4734 *
4735 * Return codes
af901ca1 4736 * 0 - successful
d439d286 4737 * -ENOMEM - could not allocated memory.
e59058c4 4738 **/
da0436e9
JS
4739static int
4740lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4741 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 4742{
da0436e9
JS
4743 int rc = 0;
4744 uint32_t dma_size;
4745 struct lpfc_dmabuf *dmabuf;
4746 struct lpfc_mqe *mqe;
dea3101e 4747
da0436e9
JS
4748 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4749 if (!dmabuf)
4750 return -ENOMEM;
4751
4752 /*
4753 * Get a DMA buffer for the vpd data resulting from the READ_REV
4754 * mailbox command.
a257bf90 4755 */
da0436e9
JS
4756 dma_size = *vpd_size;
4757 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4758 dma_size,
4759 &dmabuf->phys,
4760 GFP_KERNEL);
4761 if (!dmabuf->virt) {
4762 kfree(dmabuf);
4763 return -ENOMEM;
a257bf90 4764 }
da0436e9 4765 memset(dmabuf->virt, 0, dma_size);
a257bf90 4766
da0436e9
JS
4767 /*
4768 * The SLI4 implementation of READ_REV conflicts at word1,
4769 * bits 31:16 and SLI4 adds vpd functionality not present
4770 * in SLI3. This code corrects the conflicts.
1dcb58e5 4771 */
da0436e9
JS
4772 lpfc_read_rev(phba, mboxq);
4773 mqe = &mboxq->u.mqe;
4774 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4775 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4776 mqe->un.read_rev.word1 &= 0x0000FFFF;
4777 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4778 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4779
4780 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4781 if (rc) {
4782 dma_free_coherent(&phba->pcidev->dev, dma_size,
4783 dmabuf->virt, dmabuf->phys);
def9c7a9 4784 kfree(dmabuf);
da0436e9
JS
4785 return -EIO;
4786 }
1dcb58e5 4787
da0436e9
JS
4788 /*
4789 * The available vpd length cannot be bigger than the
4790 * DMA buffer passed to the port. Catch the less than
4791 * case and update the caller's size.
4792 */
4793 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4794 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 4795
d7c47992
JS
4796 memcpy(vpd, dmabuf->virt, *vpd_size);
4797
da0436e9
JS
4798 dma_free_coherent(&phba->pcidev->dev, dma_size,
4799 dmabuf->virt, dmabuf->phys);
4800 kfree(dmabuf);
4801 return 0;
dea3101e 4802}
4803
cd1c8301
JS
4804/**
4805 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4806 * @phba: pointer to lpfc hba data structure.
4807 *
4808 * This routine retrieves SLI4 device physical port name this PCI function
4809 * is attached to.
4810 *
4811 * Return codes
4907cb7b 4812 * 0 - successful
cd1c8301
JS
4813 * otherwise - failed to retrieve physical port name
4814 **/
4815static int
4816lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4817{
4818 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
4819 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4820 struct lpfc_controller_attribute *cntl_attr;
4821 struct lpfc_mbx_get_port_name *get_port_name;
4822 void *virtaddr = NULL;
4823 uint32_t alloclen, reqlen;
4824 uint32_t shdr_status, shdr_add_status;
4825 union lpfc_sli4_cfg_shdr *shdr;
4826 char cport_name = 0;
4827 int rc;
4828
4829 /* We assume nothing at this point */
4830 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4831 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4832
4833 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4834 if (!mboxq)
4835 return -ENOMEM;
cd1c8301 4836 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
4837 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4838 lpfc_sli4_read_config(phba);
4839 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4840 goto retrieve_ppname;
cd1c8301
JS
4841
4842 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4843 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4844 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4845 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4846 LPFC_SLI4_MBX_NEMBED);
4847 if (alloclen < reqlen) {
4848 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4849 "3084 Allocated DMA memory size (%d) is "
4850 "less than the requested DMA memory size "
4851 "(%d)\n", alloclen, reqlen);
4852 rc = -ENOMEM;
4853 goto out_free_mboxq;
4854 }
4855 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4856 virtaddr = mboxq->sge_array->addr[0];
4857 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4858 shdr = &mbx_cntl_attr->cfg_shdr;
4859 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4860 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4861 if (shdr_status || shdr_add_status || rc) {
4862 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4863 "3085 Mailbox x%x (x%x/x%x) failed, "
4864 "rc:x%x, status:x%x, add_status:x%x\n",
4865 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4866 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4867 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4868 rc, shdr_status, shdr_add_status);
4869 rc = -ENXIO;
4870 goto out_free_mboxq;
4871 }
4872 cntl_attr = &mbx_cntl_attr->cntl_attr;
4873 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4874 phba->sli4_hba.lnk_info.lnk_tp =
4875 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4876 phba->sli4_hba.lnk_info.lnk_no =
4877 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4878 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4879 "3086 lnk_type:%d, lnk_numb:%d\n",
4880 phba->sli4_hba.lnk_info.lnk_tp,
4881 phba->sli4_hba.lnk_info.lnk_no);
4882
4883retrieve_ppname:
4884 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4885 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4886 sizeof(struct lpfc_mbx_get_port_name) -
4887 sizeof(struct lpfc_sli4_cfg_mhdr),
4888 LPFC_SLI4_MBX_EMBED);
4889 get_port_name = &mboxq->u.mqe.un.get_port_name;
4890 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4891 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4892 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4893 phba->sli4_hba.lnk_info.lnk_tp);
4894 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4895 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4896 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4897 if (shdr_status || shdr_add_status || rc) {
4898 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4899 "3087 Mailbox x%x (x%x/x%x) failed: "
4900 "rc:x%x, status:x%x, add_status:x%x\n",
4901 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4902 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4903 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4904 rc, shdr_status, shdr_add_status);
4905 rc = -ENXIO;
4906 goto out_free_mboxq;
4907 }
4908 switch (phba->sli4_hba.lnk_info.lnk_no) {
4909 case LPFC_LINK_NUMBER_0:
4910 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4911 &get_port_name->u.response);
4912 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4913 break;
4914 case LPFC_LINK_NUMBER_1:
4915 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4916 &get_port_name->u.response);
4917 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4918 break;
4919 case LPFC_LINK_NUMBER_2:
4920 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4921 &get_port_name->u.response);
4922 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4923 break;
4924 case LPFC_LINK_NUMBER_3:
4925 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4926 &get_port_name->u.response);
4927 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4928 break;
4929 default:
4930 break;
4931 }
4932
4933 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4934 phba->Port[0] = cport_name;
4935 phba->Port[1] = '\0';
4936 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4937 "3091 SLI get port name: %s\n", phba->Port);
4938 }
4939
4940out_free_mboxq:
4941 if (rc != MBX_TIMEOUT) {
4942 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4943 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4944 else
4945 mempool_free(mboxq, phba->mbox_mem_pool);
4946 }
4947 return rc;
4948}
4949
e59058c4 4950/**
da0436e9
JS
4951 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4952 * @phba: pointer to lpfc hba data structure.
e59058c4 4953 *
da0436e9
JS
4954 * This routine is called to explicitly arm the SLI4 device's completion and
4955 * event queues
4956 **/
4957static void
4958lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4959{
962bc51b 4960 int fcp_eqidx;
da0436e9
JS
4961
4962 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4963 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
0558056c 4964 fcp_eqidx = 0;
2e90f4b5 4965 if (phba->sli4_hba.fcp_cq) {
67d12733 4966 do {
2e90f4b5
JS
4967 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4968 LPFC_QUEUE_REARM);
67d12733 4969 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
2e90f4b5 4970 }
67d12733
JS
4971 if (phba->sli4_hba.hba_eq) {
4972 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
2e90f4b5 4973 fcp_eqidx++)
67d12733 4974 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
2e90f4b5
JS
4975 LPFC_QUEUE_REARM);
4976 }
da0436e9
JS
4977}
4978
6d368e53
JS
4979/**
4980 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4981 * @phba: Pointer to HBA context object.
4982 * @type: The resource extent type.
b76f2dc9
JS
4983 * @extnt_count: buffer to hold port available extent count.
4984 * @extnt_size: buffer to hold element count per extent.
6d368e53 4985 *
b76f2dc9
JS
4986 * This function calls the port and retrievs the number of available
4987 * extents and their size for a particular extent type.
4988 *
4989 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 4990 **/
b76f2dc9 4991int
6d368e53
JS
4992lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4993 uint16_t *extnt_count, uint16_t *extnt_size)
4994{
4995 int rc = 0;
4996 uint32_t length;
4997 uint32_t mbox_tmo;
4998 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4999 LPFC_MBOXQ_t *mbox;
5000
5001 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5002 if (!mbox)
5003 return -ENOMEM;
5004
5005 /* Find out how many extents are available for this resource type */
5006 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5007 sizeof(struct lpfc_sli4_cfg_mhdr));
5008 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5009 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5010 length, LPFC_SLI4_MBX_EMBED);
5011
5012 /* Send an extents count of 0 - the GET doesn't use it. */
5013 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5014 LPFC_SLI4_MBX_EMBED);
5015 if (unlikely(rc)) {
5016 rc = -EIO;
5017 goto err_exit;
5018 }
5019
5020 if (!phba->sli4_hba.intr_enable)
5021 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5022 else {
a183a15f 5023 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5024 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5025 }
5026 if (unlikely(rc)) {
5027 rc = -EIO;
5028 goto err_exit;
5029 }
5030
5031 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5032 if (bf_get(lpfc_mbox_hdr_status,
5033 &rsrc_info->header.cfg_shdr.response)) {
5034 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5035 "2930 Failed to get resource extents "
5036 "Status 0x%x Add'l Status 0x%x\n",
5037 bf_get(lpfc_mbox_hdr_status,
5038 &rsrc_info->header.cfg_shdr.response),
5039 bf_get(lpfc_mbox_hdr_add_status,
5040 &rsrc_info->header.cfg_shdr.response));
5041 rc = -EIO;
5042 goto err_exit;
5043 }
5044
5045 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5046 &rsrc_info->u.rsp);
5047 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5048 &rsrc_info->u.rsp);
8a9d2e80
JS
5049
5050 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5051 "3162 Retrieved extents type-%d from port: count:%d, "
5052 "size:%d\n", type, *extnt_count, *extnt_size);
5053
5054err_exit:
6d368e53
JS
5055 mempool_free(mbox, phba->mbox_mem_pool);
5056 return rc;
5057}
5058
5059/**
5060 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5061 * @phba: Pointer to HBA context object.
5062 * @type: The extent type to check.
5063 *
5064 * This function reads the current available extents from the port and checks
5065 * if the extent count or extent size has changed since the last access.
5066 * Callers use this routine post port reset to understand if there is a
5067 * extent reprovisioning requirement.
5068 *
5069 * Returns:
5070 * -Error: error indicates problem.
5071 * 1: Extent count or size has changed.
5072 * 0: No changes.
5073 **/
5074static int
5075lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5076{
5077 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5078 uint16_t size_diff, rsrc_ext_size;
5079 int rc = 0;
5080 struct lpfc_rsrc_blks *rsrc_entry;
5081 struct list_head *rsrc_blk_list = NULL;
5082
5083 size_diff = 0;
5084 curr_ext_cnt = 0;
5085 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5086 &rsrc_ext_cnt,
5087 &rsrc_ext_size);
5088 if (unlikely(rc))
5089 return -EIO;
5090
5091 switch (type) {
5092 case LPFC_RSC_TYPE_FCOE_RPI:
5093 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5094 break;
5095 case LPFC_RSC_TYPE_FCOE_VPI:
5096 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5097 break;
5098 case LPFC_RSC_TYPE_FCOE_XRI:
5099 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5100 break;
5101 case LPFC_RSC_TYPE_FCOE_VFI:
5102 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5103 break;
5104 default:
5105 break;
5106 }
5107
5108 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5109 curr_ext_cnt++;
5110 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5111 size_diff++;
5112 }
5113
5114 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5115 rc = 1;
5116
5117 return rc;
5118}
5119
5120/**
5121 * lpfc_sli4_cfg_post_extnts -
5122 * @phba: Pointer to HBA context object.
5123 * @extnt_cnt - number of available extents.
5124 * @type - the extent type (rpi, xri, vfi, vpi).
5125 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5126 * @mbox - pointer to the caller's allocated mailbox structure.
5127 *
5128 * This function executes the extents allocation request. It also
5129 * takes care of the amount of memory needed to allocate or get the
5130 * allocated extents. It is the caller's responsibility to evaluate
5131 * the response.
5132 *
5133 * Returns:
5134 * -Error: Error value describes the condition found.
5135 * 0: if successful
5136 **/
5137static int
8a9d2e80 5138lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5139 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5140{
5141 int rc = 0;
5142 uint32_t req_len;
5143 uint32_t emb_len;
5144 uint32_t alloc_len, mbox_tmo;
5145
5146 /* Calculate the total requested length of the dma memory */
8a9d2e80 5147 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5148
5149 /*
5150 * Calculate the size of an embedded mailbox. The uint32_t
5151 * accounts for extents-specific word.
5152 */
5153 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5154 sizeof(uint32_t);
5155
5156 /*
5157 * Presume the allocation and response will fit into an embedded
5158 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5159 */
5160 *emb = LPFC_SLI4_MBX_EMBED;
5161 if (req_len > emb_len) {
8a9d2e80 5162 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5163 sizeof(union lpfc_sli4_cfg_shdr) +
5164 sizeof(uint32_t);
5165 *emb = LPFC_SLI4_MBX_NEMBED;
5166 }
5167
5168 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5169 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5170 req_len, *emb);
5171 if (alloc_len < req_len) {
5172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5173 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5174 "less than the requested DMA memory "
5175 "size (x%x)\n", alloc_len, req_len);
5176 return -ENOMEM;
5177 }
8a9d2e80 5178 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5179 if (unlikely(rc))
5180 return -EIO;
5181
5182 if (!phba->sli4_hba.intr_enable)
5183 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5184 else {
a183a15f 5185 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5186 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5187 }
5188
5189 if (unlikely(rc))
5190 rc = -EIO;
5191 return rc;
5192}
5193
5194/**
5195 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5196 * @phba: Pointer to HBA context object.
5197 * @type: The resource extent type to allocate.
5198 *
5199 * This function allocates the number of elements for the specified
5200 * resource type.
5201 **/
5202static int
5203lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5204{
5205 bool emb = false;
5206 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5207 uint16_t rsrc_id, rsrc_start, j, k;
5208 uint16_t *ids;
5209 int i, rc;
5210 unsigned long longs;
5211 unsigned long *bmask;
5212 struct lpfc_rsrc_blks *rsrc_blks;
5213 LPFC_MBOXQ_t *mbox;
5214 uint32_t length;
5215 struct lpfc_id_range *id_array = NULL;
5216 void *virtaddr = NULL;
5217 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5218 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5219 struct list_head *ext_blk_list;
5220
5221 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5222 &rsrc_cnt,
5223 &rsrc_size);
5224 if (unlikely(rc))
5225 return -EIO;
5226
5227 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5228 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5229 "3009 No available Resource Extents "
5230 "for resource type 0x%x: Count: 0x%x, "
5231 "Size 0x%x\n", type, rsrc_cnt,
5232 rsrc_size);
5233 return -ENOMEM;
5234 }
5235
8a9d2e80
JS
5236 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5237 "2903 Post resource extents type-0x%x: "
5238 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5239
5240 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5241 if (!mbox)
5242 return -ENOMEM;
5243
8a9d2e80 5244 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5245 if (unlikely(rc)) {
5246 rc = -EIO;
5247 goto err_exit;
5248 }
5249
5250 /*
5251 * Figure out where the response is located. Then get local pointers
5252 * to the response data. The port does not guarantee to respond to
5253 * all extents counts request so update the local variable with the
5254 * allocated count from the port.
5255 */
5256 if (emb == LPFC_SLI4_MBX_EMBED) {
5257 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5258 id_array = &rsrc_ext->u.rsp.id[0];
5259 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5260 } else {
5261 virtaddr = mbox->sge_array->addr[0];
5262 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5263 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5264 id_array = &n_rsrc->id;
5265 }
5266
5267 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5268 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5269
5270 /*
5271 * Based on the resource size and count, correct the base and max
5272 * resource values.
5273 */
5274 length = sizeof(struct lpfc_rsrc_blks);
5275 switch (type) {
5276 case LPFC_RSC_TYPE_FCOE_RPI:
5277 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5278 sizeof(unsigned long),
5279 GFP_KERNEL);
5280 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5281 rc = -ENOMEM;
5282 goto err_exit;
5283 }
5284 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5285 sizeof(uint16_t),
5286 GFP_KERNEL);
5287 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5288 kfree(phba->sli4_hba.rpi_bmask);
5289 rc = -ENOMEM;
5290 goto err_exit;
5291 }
5292
5293 /*
5294 * The next_rpi was initialized with the maximum available
5295 * count but the port may allocate a smaller number. Catch
5296 * that case and update the next_rpi.
5297 */
5298 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5299
5300 /* Initialize local ptrs for common extent processing later. */
5301 bmask = phba->sli4_hba.rpi_bmask;
5302 ids = phba->sli4_hba.rpi_ids;
5303 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5304 break;
5305 case LPFC_RSC_TYPE_FCOE_VPI:
5306 phba->vpi_bmask = kzalloc(longs *
5307 sizeof(unsigned long),
5308 GFP_KERNEL);
5309 if (unlikely(!phba->vpi_bmask)) {
5310 rc = -ENOMEM;
5311 goto err_exit;
5312 }
5313 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5314 sizeof(uint16_t),
5315 GFP_KERNEL);
5316 if (unlikely(!phba->vpi_ids)) {
5317 kfree(phba->vpi_bmask);
5318 rc = -ENOMEM;
5319 goto err_exit;
5320 }
5321
5322 /* Initialize local ptrs for common extent processing later. */
5323 bmask = phba->vpi_bmask;
5324 ids = phba->vpi_ids;
5325 ext_blk_list = &phba->lpfc_vpi_blk_list;
5326 break;
5327 case LPFC_RSC_TYPE_FCOE_XRI:
5328 phba->sli4_hba.xri_bmask = kzalloc(longs *
5329 sizeof(unsigned long),
5330 GFP_KERNEL);
5331 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5332 rc = -ENOMEM;
5333 goto err_exit;
5334 }
8a9d2e80 5335 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5336 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5337 sizeof(uint16_t),
5338 GFP_KERNEL);
5339 if (unlikely(!phba->sli4_hba.xri_ids)) {
5340 kfree(phba->sli4_hba.xri_bmask);
5341 rc = -ENOMEM;
5342 goto err_exit;
5343 }
5344
5345 /* Initialize local ptrs for common extent processing later. */
5346 bmask = phba->sli4_hba.xri_bmask;
5347 ids = phba->sli4_hba.xri_ids;
5348 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5349 break;
5350 case LPFC_RSC_TYPE_FCOE_VFI:
5351 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5352 sizeof(unsigned long),
5353 GFP_KERNEL);
5354 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5355 rc = -ENOMEM;
5356 goto err_exit;
5357 }
5358 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5359 sizeof(uint16_t),
5360 GFP_KERNEL);
5361 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5362 kfree(phba->sli4_hba.vfi_bmask);
5363 rc = -ENOMEM;
5364 goto err_exit;
5365 }
5366
5367 /* Initialize local ptrs for common extent processing later. */
5368 bmask = phba->sli4_hba.vfi_bmask;
5369 ids = phba->sli4_hba.vfi_ids;
5370 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5371 break;
5372 default:
5373 /* Unsupported Opcode. Fail call. */
5374 id_array = NULL;
5375 bmask = NULL;
5376 ids = NULL;
5377 ext_blk_list = NULL;
5378 goto err_exit;
5379 }
5380
5381 /*
5382 * Complete initializing the extent configuration with the
5383 * allocated ids assigned to this function. The bitmask serves
5384 * as an index into the array and manages the available ids. The
5385 * array just stores the ids communicated to the port via the wqes.
5386 */
5387 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5388 if ((i % 2) == 0)
5389 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5390 &id_array[k]);
5391 else
5392 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5393 &id_array[k]);
5394
5395 rsrc_blks = kzalloc(length, GFP_KERNEL);
5396 if (unlikely(!rsrc_blks)) {
5397 rc = -ENOMEM;
5398 kfree(bmask);
5399 kfree(ids);
5400 goto err_exit;
5401 }
5402 rsrc_blks->rsrc_start = rsrc_id;
5403 rsrc_blks->rsrc_size = rsrc_size;
5404 list_add_tail(&rsrc_blks->list, ext_blk_list);
5405 rsrc_start = rsrc_id;
5406 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5407 phba->sli4_hba.scsi_xri_start = rsrc_start +
5408 lpfc_sli4_get_els_iocb_cnt(phba);
5409
5410 while (rsrc_id < (rsrc_start + rsrc_size)) {
5411 ids[j] = rsrc_id;
5412 rsrc_id++;
5413 j++;
5414 }
5415 /* Entire word processed. Get next word.*/
5416 if ((i % 2) == 1)
5417 k++;
5418 }
5419 err_exit:
5420 lpfc_sli4_mbox_cmd_free(phba, mbox);
5421 return rc;
5422}
5423
5424/**
5425 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5426 * @phba: Pointer to HBA context object.
5427 * @type: the extent's type.
5428 *
5429 * This function deallocates all extents of a particular resource type.
5430 * SLI4 does not allow for deallocating a particular extent range. It
5431 * is the caller's responsibility to release all kernel memory resources.
5432 **/
5433static int
5434lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5435{
5436 int rc;
5437 uint32_t length, mbox_tmo = 0;
5438 LPFC_MBOXQ_t *mbox;
5439 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5440 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5441
5442 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5443 if (!mbox)
5444 return -ENOMEM;
5445
5446 /*
5447 * This function sends an embedded mailbox because it only sends the
5448 * the resource type. All extents of this type are released by the
5449 * port.
5450 */
5451 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5452 sizeof(struct lpfc_sli4_cfg_mhdr));
5453 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5454 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5455 length, LPFC_SLI4_MBX_EMBED);
5456
5457 /* Send an extents count of 0 - the dealloc doesn't use it. */
5458 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5459 LPFC_SLI4_MBX_EMBED);
5460 if (unlikely(rc)) {
5461 rc = -EIO;
5462 goto out_free_mbox;
5463 }
5464 if (!phba->sli4_hba.intr_enable)
5465 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5466 else {
a183a15f 5467 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5468 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5469 }
5470 if (unlikely(rc)) {
5471 rc = -EIO;
5472 goto out_free_mbox;
5473 }
5474
5475 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5476 if (bf_get(lpfc_mbox_hdr_status,
5477 &dealloc_rsrc->header.cfg_shdr.response)) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5479 "2919 Failed to release resource extents "
5480 "for type %d - Status 0x%x Add'l Status 0x%x. "
5481 "Resource memory not released.\n",
5482 type,
5483 bf_get(lpfc_mbox_hdr_status,
5484 &dealloc_rsrc->header.cfg_shdr.response),
5485 bf_get(lpfc_mbox_hdr_add_status,
5486 &dealloc_rsrc->header.cfg_shdr.response));
5487 rc = -EIO;
5488 goto out_free_mbox;
5489 }
5490
5491 /* Release kernel memory resources for the specific type. */
5492 switch (type) {
5493 case LPFC_RSC_TYPE_FCOE_VPI:
5494 kfree(phba->vpi_bmask);
5495 kfree(phba->vpi_ids);
5496 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5497 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5498 &phba->lpfc_vpi_blk_list, list) {
5499 list_del_init(&rsrc_blk->list);
5500 kfree(rsrc_blk);
5501 }
5502 break;
5503 case LPFC_RSC_TYPE_FCOE_XRI:
5504 kfree(phba->sli4_hba.xri_bmask);
5505 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5506 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5507 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5508 list_del_init(&rsrc_blk->list);
5509 kfree(rsrc_blk);
5510 }
5511 break;
5512 case LPFC_RSC_TYPE_FCOE_VFI:
5513 kfree(phba->sli4_hba.vfi_bmask);
5514 kfree(phba->sli4_hba.vfi_ids);
5515 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5516 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5517 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5518 list_del_init(&rsrc_blk->list);
5519 kfree(rsrc_blk);
5520 }
5521 break;
5522 case LPFC_RSC_TYPE_FCOE_RPI:
5523 /* RPI bitmask and physical id array are cleaned up earlier. */
5524 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5525 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5526 list_del_init(&rsrc_blk->list);
5527 kfree(rsrc_blk);
5528 }
5529 break;
5530 default:
5531 break;
5532 }
5533
5534 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5535
5536 out_free_mbox:
5537 mempool_free(mbox, phba->mbox_mem_pool);
5538 return rc;
5539}
5540
5541/**
5542 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5543 * @phba: Pointer to HBA context object.
5544 *
5545 * This function allocates all SLI4 resource identifiers.
5546 **/
5547int
5548lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5549{
5550 int i, rc, error = 0;
5551 uint16_t count, base;
5552 unsigned long longs;
5553
ff78d8f9
JS
5554 if (!phba->sli4_hba.rpi_hdrs_in_use)
5555 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
5556 if (phba->sli4_hba.extents_in_use) {
5557 /*
5558 * The port supports resource extents. The XRI, VPI, VFI, RPI
5559 * resource extent count must be read and allocated before
5560 * provisioning the resource id arrays.
5561 */
5562 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5563 LPFC_IDX_RSRC_RDY) {
5564 /*
5565 * Extent-based resources are set - the driver could
5566 * be in a port reset. Figure out if any corrective
5567 * actions need to be taken.
5568 */
5569 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5570 LPFC_RSC_TYPE_FCOE_VFI);
5571 if (rc != 0)
5572 error++;
5573 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5574 LPFC_RSC_TYPE_FCOE_VPI);
5575 if (rc != 0)
5576 error++;
5577 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5578 LPFC_RSC_TYPE_FCOE_XRI);
5579 if (rc != 0)
5580 error++;
5581 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5582 LPFC_RSC_TYPE_FCOE_RPI);
5583 if (rc != 0)
5584 error++;
5585
5586 /*
5587 * It's possible that the number of resources
5588 * provided to this port instance changed between
5589 * resets. Detect this condition and reallocate
5590 * resources. Otherwise, there is no action.
5591 */
5592 if (error) {
5593 lpfc_printf_log(phba, KERN_INFO,
5594 LOG_MBOX | LOG_INIT,
5595 "2931 Detected extent resource "
5596 "change. Reallocating all "
5597 "extents.\n");
5598 rc = lpfc_sli4_dealloc_extent(phba,
5599 LPFC_RSC_TYPE_FCOE_VFI);
5600 rc = lpfc_sli4_dealloc_extent(phba,
5601 LPFC_RSC_TYPE_FCOE_VPI);
5602 rc = lpfc_sli4_dealloc_extent(phba,
5603 LPFC_RSC_TYPE_FCOE_XRI);
5604 rc = lpfc_sli4_dealloc_extent(phba,
5605 LPFC_RSC_TYPE_FCOE_RPI);
5606 } else
5607 return 0;
5608 }
5609
5610 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5611 if (unlikely(rc))
5612 goto err_exit;
5613
5614 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5615 if (unlikely(rc))
5616 goto err_exit;
5617
5618 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5619 if (unlikely(rc))
5620 goto err_exit;
5621
5622 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5623 if (unlikely(rc))
5624 goto err_exit;
5625 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5626 LPFC_IDX_RSRC_RDY);
5627 return rc;
5628 } else {
5629 /*
5630 * The port does not support resource extents. The XRI, VPI,
5631 * VFI, RPI resource ids were determined from READ_CONFIG.
5632 * Just allocate the bitmasks and provision the resource id
5633 * arrays. If a port reset is active, the resources don't
5634 * need any action - just exit.
5635 */
5636 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
5637 LPFC_IDX_RSRC_RDY) {
5638 lpfc_sli4_dealloc_resource_identifiers(phba);
5639 lpfc_sli4_remove_rpis(phba);
5640 }
6d368e53
JS
5641 /* RPIs. */
5642 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
5643 if (count <= 0) {
5644 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5645 "3279 Invalid provisioning of "
5646 "rpi:%d\n", count);
5647 rc = -EINVAL;
5648 goto err_exit;
5649 }
6d368e53
JS
5650 base = phba->sli4_hba.max_cfg_param.rpi_base;
5651 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5652 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5653 sizeof(unsigned long),
5654 GFP_KERNEL);
5655 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5656 rc = -ENOMEM;
5657 goto err_exit;
5658 }
5659 phba->sli4_hba.rpi_ids = kzalloc(count *
5660 sizeof(uint16_t),
5661 GFP_KERNEL);
5662 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5663 rc = -ENOMEM;
5664 goto free_rpi_bmask;
5665 }
5666
5667 for (i = 0; i < count; i++)
5668 phba->sli4_hba.rpi_ids[i] = base + i;
5669
5670 /* VPIs. */
5671 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
5672 if (count <= 0) {
5673 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5674 "3280 Invalid provisioning of "
5675 "vpi:%d\n", count);
5676 rc = -EINVAL;
5677 goto free_rpi_ids;
5678 }
6d368e53
JS
5679 base = phba->sli4_hba.max_cfg_param.vpi_base;
5680 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5681 phba->vpi_bmask = kzalloc(longs *
5682 sizeof(unsigned long),
5683 GFP_KERNEL);
5684 if (unlikely(!phba->vpi_bmask)) {
5685 rc = -ENOMEM;
5686 goto free_rpi_ids;
5687 }
5688 phba->vpi_ids = kzalloc(count *
5689 sizeof(uint16_t),
5690 GFP_KERNEL);
5691 if (unlikely(!phba->vpi_ids)) {
5692 rc = -ENOMEM;
5693 goto free_vpi_bmask;
5694 }
5695
5696 for (i = 0; i < count; i++)
5697 phba->vpi_ids[i] = base + i;
5698
5699 /* XRIs. */
5700 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
5701 if (count <= 0) {
5702 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5703 "3281 Invalid provisioning of "
5704 "xri:%d\n", count);
5705 rc = -EINVAL;
5706 goto free_vpi_ids;
5707 }
6d368e53
JS
5708 base = phba->sli4_hba.max_cfg_param.xri_base;
5709 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5710 phba->sli4_hba.xri_bmask = kzalloc(longs *
5711 sizeof(unsigned long),
5712 GFP_KERNEL);
5713 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5714 rc = -ENOMEM;
5715 goto free_vpi_ids;
5716 }
41899be7 5717 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5718 phba->sli4_hba.xri_ids = kzalloc(count *
5719 sizeof(uint16_t),
5720 GFP_KERNEL);
5721 if (unlikely(!phba->sli4_hba.xri_ids)) {
5722 rc = -ENOMEM;
5723 goto free_xri_bmask;
5724 }
5725
5726 for (i = 0; i < count; i++)
5727 phba->sli4_hba.xri_ids[i] = base + i;
5728
5729 /* VFIs. */
5730 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
5731 if (count <= 0) {
5732 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5733 "3282 Invalid provisioning of "
5734 "vfi:%d\n", count);
5735 rc = -EINVAL;
5736 goto free_xri_ids;
5737 }
6d368e53
JS
5738 base = phba->sli4_hba.max_cfg_param.vfi_base;
5739 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5740 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5741 sizeof(unsigned long),
5742 GFP_KERNEL);
5743 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5744 rc = -ENOMEM;
5745 goto free_xri_ids;
5746 }
5747 phba->sli4_hba.vfi_ids = kzalloc(count *
5748 sizeof(uint16_t),
5749 GFP_KERNEL);
5750 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5751 rc = -ENOMEM;
5752 goto free_vfi_bmask;
5753 }
5754
5755 for (i = 0; i < count; i++)
5756 phba->sli4_hba.vfi_ids[i] = base + i;
5757
5758 /*
5759 * Mark all resources ready. An HBA reset doesn't need
5760 * to reset the initialization.
5761 */
5762 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5763 LPFC_IDX_RSRC_RDY);
5764 return 0;
5765 }
5766
5767 free_vfi_bmask:
5768 kfree(phba->sli4_hba.vfi_bmask);
5769 free_xri_ids:
5770 kfree(phba->sli4_hba.xri_ids);
5771 free_xri_bmask:
5772 kfree(phba->sli4_hba.xri_bmask);
5773 free_vpi_ids:
5774 kfree(phba->vpi_ids);
5775 free_vpi_bmask:
5776 kfree(phba->vpi_bmask);
5777 free_rpi_ids:
5778 kfree(phba->sli4_hba.rpi_ids);
5779 free_rpi_bmask:
5780 kfree(phba->sli4_hba.rpi_bmask);
5781 err_exit:
5782 return rc;
5783}
5784
5785/**
5786 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5787 * @phba: Pointer to HBA context object.
5788 *
5789 * This function allocates the number of elements for the specified
5790 * resource type.
5791 **/
5792int
5793lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5794{
5795 if (phba->sli4_hba.extents_in_use) {
5796 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5797 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5798 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5799 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5800 } else {
5801 kfree(phba->vpi_bmask);
5802 kfree(phba->vpi_ids);
5803 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5804 kfree(phba->sli4_hba.xri_bmask);
5805 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5806 kfree(phba->sli4_hba.vfi_bmask);
5807 kfree(phba->sli4_hba.vfi_ids);
5808 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5809 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5810 }
5811
5812 return 0;
5813}
5814
b76f2dc9
JS
5815/**
5816 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5817 * @phba: Pointer to HBA context object.
5818 * @type: The resource extent type.
5819 * @extnt_count: buffer to hold port extent count response
5820 * @extnt_size: buffer to hold port extent size response.
5821 *
5822 * This function calls the port to read the host allocated extents
5823 * for a particular type.
5824 **/
5825int
5826lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5827 uint16_t *extnt_cnt, uint16_t *extnt_size)
5828{
5829 bool emb;
5830 int rc = 0;
5831 uint16_t curr_blks = 0;
5832 uint32_t req_len, emb_len;
5833 uint32_t alloc_len, mbox_tmo;
5834 struct list_head *blk_list_head;
5835 struct lpfc_rsrc_blks *rsrc_blk;
5836 LPFC_MBOXQ_t *mbox;
5837 void *virtaddr = NULL;
5838 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5839 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5840 union lpfc_sli4_cfg_shdr *shdr;
5841
5842 switch (type) {
5843 case LPFC_RSC_TYPE_FCOE_VPI:
5844 blk_list_head = &phba->lpfc_vpi_blk_list;
5845 break;
5846 case LPFC_RSC_TYPE_FCOE_XRI:
5847 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5848 break;
5849 case LPFC_RSC_TYPE_FCOE_VFI:
5850 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5851 break;
5852 case LPFC_RSC_TYPE_FCOE_RPI:
5853 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5854 break;
5855 default:
5856 return -EIO;
5857 }
5858
5859 /* Count the number of extents currently allocatd for this type. */
5860 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5861 if (curr_blks == 0) {
5862 /*
5863 * The GET_ALLOCATED mailbox does not return the size,
5864 * just the count. The size should be just the size
5865 * stored in the current allocated block and all sizes
5866 * for an extent type are the same so set the return
5867 * value now.
5868 */
5869 *extnt_size = rsrc_blk->rsrc_size;
5870 }
5871 curr_blks++;
5872 }
5873
5874 /* Calculate the total requested length of the dma memory. */
5875 req_len = curr_blks * sizeof(uint16_t);
5876
5877 /*
5878 * Calculate the size of an embedded mailbox. The uint32_t
5879 * accounts for extents-specific word.
5880 */
5881 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5882 sizeof(uint32_t);
5883
5884 /*
5885 * Presume the allocation and response will fit into an embedded
5886 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5887 */
5888 emb = LPFC_SLI4_MBX_EMBED;
5889 req_len = emb_len;
5890 if (req_len > emb_len) {
5891 req_len = curr_blks * sizeof(uint16_t) +
5892 sizeof(union lpfc_sli4_cfg_shdr) +
5893 sizeof(uint32_t);
5894 emb = LPFC_SLI4_MBX_NEMBED;
5895 }
5896
5897 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5898 if (!mbox)
5899 return -ENOMEM;
5900 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5901
5902 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5903 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5904 req_len, emb);
5905 if (alloc_len < req_len) {
5906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5907 "2983 Allocated DMA memory size (x%x) is "
5908 "less than the requested DMA memory "
5909 "size (x%x)\n", alloc_len, req_len);
5910 rc = -ENOMEM;
5911 goto err_exit;
5912 }
5913 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5914 if (unlikely(rc)) {
5915 rc = -EIO;
5916 goto err_exit;
5917 }
5918
5919 if (!phba->sli4_hba.intr_enable)
5920 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5921 else {
a183a15f 5922 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
5923 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5924 }
5925
5926 if (unlikely(rc)) {
5927 rc = -EIO;
5928 goto err_exit;
5929 }
5930
5931 /*
5932 * Figure out where the response is located. Then get local pointers
5933 * to the response data. The port does not guarantee to respond to
5934 * all extents counts request so update the local variable with the
5935 * allocated count from the port.
5936 */
5937 if (emb == LPFC_SLI4_MBX_EMBED) {
5938 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5939 shdr = &rsrc_ext->header.cfg_shdr;
5940 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5941 } else {
5942 virtaddr = mbox->sge_array->addr[0];
5943 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5944 shdr = &n_rsrc->cfg_shdr;
5945 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5946 }
5947
5948 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5949 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5950 "2984 Failed to read allocated resources "
5951 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5952 type,
5953 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5954 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5955 rc = -EIO;
5956 goto err_exit;
5957 }
5958 err_exit:
5959 lpfc_sli4_mbox_cmd_free(phba, mbox);
5960 return rc;
5961}
5962
8a9d2e80
JS
5963/**
5964 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5965 * @phba: pointer to lpfc hba data structure.
5966 *
5967 * This routine walks the list of els buffers that have been allocated and
5968 * repost them to the port by using SGL block post. This is needed after a
5969 * pci_function_reset/warm_start or start. It attempts to construct blocks
5970 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5971 * SGL block post mailbox commands to post them to the port. For single els
5972 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5973 * mailbox command for posting.
5974 *
5975 * Returns: 0 = success, non-zero failure.
5976 **/
5977static int
5978lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5979{
5980 struct lpfc_sglq *sglq_entry = NULL;
5981 struct lpfc_sglq *sglq_entry_next = NULL;
5982 struct lpfc_sglq *sglq_entry_first = NULL;
5983 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5984 int last_xritag = NO_XRI;
5985 LIST_HEAD(prep_sgl_list);
5986 LIST_HEAD(blck_sgl_list);
5987 LIST_HEAD(allc_sgl_list);
5988 LIST_HEAD(post_sgl_list);
5989 LIST_HEAD(free_sgl_list);
5990
5991 spin_lock(&phba->hbalock);
5992 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5993 spin_unlock(&phba->hbalock);
5994
5995 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5996 &allc_sgl_list, list) {
5997 list_del_init(&sglq_entry->list);
5998 block_cnt++;
5999 if ((last_xritag != NO_XRI) &&
6000 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6001 /* a hole in xri block, form a sgl posting block */
6002 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6003 post_cnt = block_cnt - 1;
6004 /* prepare list for next posting block */
6005 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6006 block_cnt = 1;
6007 } else {
6008 /* prepare list for next posting block */
6009 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6010 /* enough sgls for non-embed sgl mbox command */
6011 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6012 list_splice_init(&prep_sgl_list,
6013 &blck_sgl_list);
6014 post_cnt = block_cnt;
6015 block_cnt = 0;
6016 }
6017 }
6018 num_posted++;
6019
6020 /* keep track of last sgl's xritag */
6021 last_xritag = sglq_entry->sli4_xritag;
6022
6023 /* end of repost sgl list condition for els buffers */
6024 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6025 if (post_cnt == 0) {
6026 list_splice_init(&prep_sgl_list,
6027 &blck_sgl_list);
6028 post_cnt = block_cnt;
6029 } else if (block_cnt == 1) {
6030 status = lpfc_sli4_post_sgl(phba,
6031 sglq_entry->phys, 0,
6032 sglq_entry->sli4_xritag);
6033 if (!status) {
6034 /* successful, put sgl to posted list */
6035 list_add_tail(&sglq_entry->list,
6036 &post_sgl_list);
6037 } else {
6038 /* Failure, put sgl to free list */
6039 lpfc_printf_log(phba, KERN_WARNING,
6040 LOG_SLI,
6041 "3159 Failed to post els "
6042 "sgl, xritag:x%x\n",
6043 sglq_entry->sli4_xritag);
6044 list_add_tail(&sglq_entry->list,
6045 &free_sgl_list);
6046 spin_lock_irq(&phba->hbalock);
6047 phba->sli4_hba.els_xri_cnt--;
6048 spin_unlock_irq(&phba->hbalock);
6049 }
6050 }
6051 }
6052
6053 /* continue until a nembed page worth of sgls */
6054 if (post_cnt == 0)
6055 continue;
6056
6057 /* post the els buffer list sgls as a block */
6058 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6059 post_cnt);
6060
6061 if (!status) {
6062 /* success, put sgl list to posted sgl list */
6063 list_splice_init(&blck_sgl_list, &post_sgl_list);
6064 } else {
6065 /* Failure, put sgl list to free sgl list */
6066 sglq_entry_first = list_first_entry(&blck_sgl_list,
6067 struct lpfc_sglq,
6068 list);
6069 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6070 "3160 Failed to post els sgl-list, "
6071 "xritag:x%x-x%x\n",
6072 sglq_entry_first->sli4_xritag,
6073 (sglq_entry_first->sli4_xritag +
6074 post_cnt - 1));
6075 list_splice_init(&blck_sgl_list, &free_sgl_list);
6076 spin_lock_irq(&phba->hbalock);
6077 phba->sli4_hba.els_xri_cnt -= post_cnt;
6078 spin_unlock_irq(&phba->hbalock);
6079 }
6080
6081 /* don't reset xirtag due to hole in xri block */
6082 if (block_cnt == 0)
6083 last_xritag = NO_XRI;
6084
6085 /* reset els sgl post count for next round of posting */
6086 post_cnt = 0;
6087 }
6088
6089 /* free the els sgls failed to post */
6090 lpfc_free_sgl_list(phba, &free_sgl_list);
6091
6092 /* push els sgls posted to the availble list */
6093 if (!list_empty(&post_sgl_list)) {
6094 spin_lock(&phba->hbalock);
6095 list_splice_init(&post_sgl_list,
6096 &phba->sli4_hba.lpfc_sgl_list);
6097 spin_unlock(&phba->hbalock);
6098 } else {
6099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6100 "3161 Failure to post els sgl to port.\n");
6101 return -EIO;
6102 }
6103 return 0;
6104}
6105
da0436e9
JS
6106/**
6107 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6108 * @phba: Pointer to HBA context object.
6109 *
6110 * This function is the main SLI4 device intialization PCI function. This
6111 * function is called by the HBA intialization code, HBA reset code and
6112 * HBA error attention handler code. Caller is not required to hold any
6113 * locks.
6114 **/
6115int
6116lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6117{
6118 int rc;
6119 LPFC_MBOXQ_t *mboxq;
6120 struct lpfc_mqe *mqe;
6121 uint8_t *vpd;
6122 uint32_t vpd_size;
6123 uint32_t ftr_rsp = 0;
6124 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6125 struct lpfc_vport *vport = phba->pport;
6126 struct lpfc_dmabuf *mp;
6127
6128 /* Perform a PCI function reset to start from clean */
6129 rc = lpfc_pci_function_reset(phba);
6130 if (unlikely(rc))
6131 return -ENODEV;
6132
6133 /* Check the HBA Host Status Register for readyness */
6134 rc = lpfc_sli4_post_status_check(phba);
6135 if (unlikely(rc))
6136 return -ENODEV;
6137 else {
6138 spin_lock_irq(&phba->hbalock);
6139 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6140 spin_unlock_irq(&phba->hbalock);
6141 }
6142
6143 /*
6144 * Allocate a single mailbox container for initializing the
6145 * port.
6146 */
6147 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6148 if (!mboxq)
6149 return -ENOMEM;
6150
da0436e9 6151 /* Issue READ_REV to collect vpd and FW information. */
49198b37 6152 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
6153 vpd = kzalloc(vpd_size, GFP_KERNEL);
6154 if (!vpd) {
6155 rc = -ENOMEM;
6156 goto out_free_mbox;
6157 }
6158
6159 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
6160 if (unlikely(rc)) {
6161 kfree(vpd);
6162 goto out_free_mbox;
6163 }
da0436e9 6164 mqe = &mboxq->u.mqe;
f1126688
JS
6165 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6166 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
76a95d75
JS
6167 phba->hba_flag |= HBA_FCOE_MODE;
6168 else
6169 phba->hba_flag &= ~HBA_FCOE_MODE;
45ed1190
JS
6170
6171 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6172 LPFC_DCBX_CEE_MODE)
6173 phba->hba_flag |= HBA_FIP_SUPPORT;
6174 else
6175 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6176
4f2e66c6
JS
6177 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6178
c31098ce 6179 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
6180 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6181 "0376 READ_REV Error. SLI Level %d "
6182 "FCoE enabled %d\n",
76a95d75 6183 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 6184 rc = -EIO;
76a95d75
JS
6185 kfree(vpd);
6186 goto out_free_mbox;
da0436e9 6187 }
cd1c8301 6188
ff78d8f9
JS
6189 /*
6190 * Continue initialization with default values even if driver failed
6191 * to read FCoE param config regions, only read parameters if the
6192 * board is FCoE
6193 */
6194 if (phba->hba_flag & HBA_FCOE_MODE &&
6195 lpfc_sli4_read_fcoe_params(phba))
6196 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6197 "2570 Failed to read FCoE parameters\n");
6198
cd1c8301
JS
6199 /*
6200 * Retrieve sli4 device physical port name, failure of doing it
6201 * is considered as non-fatal.
6202 */
6203 rc = lpfc_sli4_retrieve_pport_name(phba);
6204 if (!rc)
6205 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6206 "3080 Successful retrieving SLI4 device "
6207 "physical port name: %s.\n", phba->Port);
6208
da0436e9
JS
6209 /*
6210 * Evaluate the read rev and vpd data. Populate the driver
6211 * state with the results. If this routine fails, the failure
6212 * is not fatal as the driver will use generic values.
6213 */
6214 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6215 if (unlikely(!rc)) {
6216 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6217 "0377 Error %d parsing vpd. "
6218 "Using defaults.\n", rc);
6219 rc = 0;
6220 }
76a95d75 6221 kfree(vpd);
da0436e9 6222
f1126688
JS
6223 /* Save information as VPD data */
6224 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6225 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6226 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6227 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6228 &mqe->un.read_rev);
6229 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6230 &mqe->un.read_rev);
6231 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6232 &mqe->un.read_rev);
6233 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6234 &mqe->un.read_rev);
6235 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6236 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6237 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6238 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6239 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6240 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6241 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6242 "(%d):0380 READ_REV Status x%x "
6243 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6244 mboxq->vport ? mboxq->vport->vpi : 0,
6245 bf_get(lpfc_mqe_status, mqe),
6246 phba->vpd.rev.opFwName,
6247 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6248 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9
JS
6249
6250 /*
6251 * Discover the port's supported feature set and match it against the
6252 * hosts requests.
6253 */
6254 lpfc_request_features(phba, mboxq);
6255 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6256 if (unlikely(rc)) {
6257 rc = -EIO;
76a95d75 6258 goto out_free_mbox;
da0436e9
JS
6259 }
6260
6261 /*
6262 * The port must support FCP initiator mode as this is the
6263 * only mode running in the host.
6264 */
6265 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6266 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6267 "0378 No support for fcpi mode.\n");
6268 ftr_rsp++;
6269 }
fedd3b7b
JS
6270 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6271 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6272 else
6273 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
da0436e9
JS
6274 /*
6275 * If the port cannot support the host's requested features
6276 * then turn off the global config parameters to disable the
6277 * feature in the driver. This is not a fatal error.
6278 */
bf08611b
JS
6279 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6280 if (phba->cfg_enable_bg) {
6281 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6282 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6283 else
6284 ftr_rsp++;
6285 }
da0436e9
JS
6286
6287 if (phba->max_vpi && phba->cfg_enable_npiv &&
6288 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6289 ftr_rsp++;
6290
6291 if (ftr_rsp) {
6292 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6293 "0379 Feature Mismatch Data: x%08x %08x "
6294 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6295 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6296 phba->cfg_enable_npiv, phba->max_vpi);
6297 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6298 phba->cfg_enable_bg = 0;
6299 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6300 phba->cfg_enable_npiv = 0;
6301 }
6302
6303 /* These SLI3 features are assumed in SLI4 */
6304 spin_lock_irq(&phba->hbalock);
6305 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6306 spin_unlock_irq(&phba->hbalock);
6307
6d368e53
JS
6308 /*
6309 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6310 * calls depends on these resources to complete port setup.
6311 */
6312 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6313 if (rc) {
6314 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6315 "2920 Failed to alloc Resource IDs "
6316 "rc = x%x\n", rc);
6317 goto out_free_mbox;
6318 }
6319
da0436e9 6320 /* Read the port's service parameters. */
9f1177a3
JS
6321 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6322 if (rc) {
6323 phba->link_state = LPFC_HBA_ERROR;
6324 rc = -ENOMEM;
76a95d75 6325 goto out_free_mbox;
9f1177a3
JS
6326 }
6327
da0436e9
JS
6328 mboxq->vport = vport;
6329 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6330 mp = (struct lpfc_dmabuf *) mboxq->context1;
6331 if (rc == MBX_SUCCESS) {
6332 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6333 rc = 0;
6334 }
6335
6336 /*
6337 * This memory was allocated by the lpfc_read_sparam routine. Release
6338 * it to the mbuf pool.
6339 */
6340 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6341 kfree(mp);
6342 mboxq->context1 = NULL;
6343 if (unlikely(rc)) {
6344 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6345 "0382 READ_SPARAM command failed "
6346 "status %d, mbxStatus x%x\n",
6347 rc, bf_get(lpfc_mqe_status, mqe));
6348 phba->link_state = LPFC_HBA_ERROR;
6349 rc = -EIO;
76a95d75 6350 goto out_free_mbox;
da0436e9
JS
6351 }
6352
0558056c 6353 lpfc_update_vport_wwn(vport);
da0436e9
JS
6354
6355 /* Update the fc_host data structures with new wwn. */
6356 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6357 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6358
8a9d2e80
JS
6359 /* update host els and scsi xri-sgl sizes and mappings */
6360 rc = lpfc_sli4_xri_sgl_update(phba);
6361 if (unlikely(rc)) {
6362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6363 "1400 Failed to update xri-sgl size and "
6364 "mapping: %d\n", rc);
6365 goto out_free_mbox;
da0436e9
JS
6366 }
6367
8a9d2e80
JS
6368 /* register the els sgl pool to the port */
6369 rc = lpfc_sli4_repost_els_sgl_list(phba);
6370 if (unlikely(rc)) {
6371 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6372 "0582 Error %d during els sgl post "
6373 "operation\n", rc);
6374 rc = -ENODEV;
6375 goto out_free_mbox;
6376 }
6377
6378 /* register the allocated scsi sgl pool to the port */
da0436e9
JS
6379 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6380 if (unlikely(rc)) {
6d368e53 6381 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
6382 "0383 Error %d during scsi sgl post "
6383 "operation\n", rc);
da0436e9
JS
6384 /* Some Scsi buffers were moved to the abort scsi list */
6385 /* A pci function reset will repost them */
6386 rc = -ENODEV;
76a95d75 6387 goto out_free_mbox;
da0436e9
JS
6388 }
6389
6390 /* Post the rpi header region to the device. */
6391 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6392 if (unlikely(rc)) {
6393 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6394 "0393 Error %d during rpi post operation\n",
6395 rc);
6396 rc = -ENODEV;
76a95d75 6397 goto out_free_mbox;
da0436e9 6398 }
97f2ecf1 6399 lpfc_sli4_node_prep(phba);
da0436e9 6400
5350d872
JS
6401 /* Create all the SLI4 queues */
6402 rc = lpfc_sli4_queue_create(phba);
6403 if (rc) {
6404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6405 "3089 Failed to allocate queues\n");
6406 rc = -ENODEV;
6407 goto out_stop_timers;
6408 }
da0436e9
JS
6409 /* Set up all the queues to the device */
6410 rc = lpfc_sli4_queue_setup(phba);
6411 if (unlikely(rc)) {
6412 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6413 "0381 Error %d during queue setup.\n ", rc);
5350d872 6414 goto out_destroy_queue;
da0436e9
JS
6415 }
6416
6417 /* Arm the CQs and then EQs on device */
6418 lpfc_sli4_arm_cqeq_intr(phba);
6419
6420 /* Indicate device interrupt mode */
6421 phba->sli4_hba.intr_enable = 1;
6422
6423 /* Allow asynchronous mailbox command to go through */
6424 spin_lock_irq(&phba->hbalock);
6425 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6426 spin_unlock_irq(&phba->hbalock);
6427
6428 /* Post receive buffers to the device */
6429 lpfc_sli4_rb_setup(phba);
6430
fc2b989b
JS
6431 /* Reset HBA FCF states after HBA reset */
6432 phba->fcf.fcf_flag = 0;
6433 phba->fcf.current_rec.flag = 0;
6434
da0436e9 6435 /* Start the ELS watchdog timer */
8fa38513
JS
6436 mod_timer(&vport->els_tmofunc,
6437 jiffies + HZ * (phba->fc_ratov * 2));
da0436e9
JS
6438
6439 /* Start heart beat timer */
6440 mod_timer(&phba->hb_tmofunc,
6441 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6442 phba->hb_outstanding = 0;
6443 phba->last_completion_time = jiffies;
6444
6445 /* Start error attention (ERATT) polling timer */
6446 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6447
75baf696
JS
6448 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6449 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6450 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6451 if (!rc) {
6452 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6453 "2829 This device supports "
6454 "Advanced Error Reporting (AER)\n");
6455 spin_lock_irq(&phba->hbalock);
6456 phba->hba_flag |= HBA_AER_ENABLED;
6457 spin_unlock_irq(&phba->hbalock);
6458 } else {
6459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6460 "2830 This device does not support "
6461 "Advanced Error Reporting (AER)\n");
6462 phba->cfg_aer_support = 0;
6463 }
0a96e975 6464 rc = 0;
75baf696
JS
6465 }
6466
76a95d75
JS
6467 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6468 /*
6469 * The FC Port needs to register FCFI (index 0)
6470 */
6471 lpfc_reg_fcfi(phba, mboxq);
6472 mboxq->vport = phba->pport;
6473 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9589b062 6474 if (rc != MBX_SUCCESS)
76a95d75 6475 goto out_unset_queue;
9589b062
JS
6476 rc = 0;
6477 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6478 &mboxq->u.mqe.un.reg_fcfi);
026abb87
JS
6479
6480 /* Check if the port is configured to be disabled */
6481 lpfc_sli_read_link_ste(phba);
76a95d75 6482 }
026abb87 6483
da0436e9
JS
6484 /*
6485 * The port is ready, set the host's link state to LINK_DOWN
6486 * in preparation for link interrupts.
6487 */
da0436e9
JS
6488 spin_lock_irq(&phba->hbalock);
6489 phba->link_state = LPFC_LINK_DOWN;
6490 spin_unlock_irq(&phba->hbalock);
026abb87
JS
6491 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6492 (phba->hba_flag & LINK_DISABLED)) {
6493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6494 "3103 Adapter Link is disabled.\n");
6495 lpfc_down_link(phba, mboxq);
6496 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6497 if (rc != MBX_SUCCESS) {
6498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6499 "3104 Adapter failed to issue "
6500 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6501 goto out_unset_queue;
6502 }
6503 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
6504 /* don't perform init_link on SLI4 FC port loopback test */
6505 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6506 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6507 if (rc)
6508 goto out_unset_queue;
6509 }
5350d872
JS
6510 }
6511 mempool_free(mboxq, phba->mbox_mem_pool);
6512 return rc;
76a95d75 6513out_unset_queue:
da0436e9 6514 /* Unset all the queues set up in this routine when error out */
5350d872
JS
6515 lpfc_sli4_queue_unset(phba);
6516out_destroy_queue:
6517 lpfc_sli4_queue_destroy(phba);
da0436e9 6518out_stop_timers:
5350d872 6519 lpfc_stop_hba_timers(phba);
da0436e9
JS
6520out_free_mbox:
6521 mempool_free(mboxq, phba->mbox_mem_pool);
6522 return rc;
6523}
6524
6525/**
6526 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6527 * @ptr: context object - pointer to hba structure.
6528 *
6529 * This is the callback function for mailbox timer. The mailbox
6530 * timer is armed when a new mailbox command is issued and the timer
6531 * is deleted when the mailbox complete. The function is called by
6532 * the kernel timer code when a mailbox does not complete within
6533 * expected time. This function wakes up the worker thread to
6534 * process the mailbox timeout and returns. All the processing is
6535 * done by the worker thread function lpfc_mbox_timeout_handler.
6536 **/
6537void
6538lpfc_mbox_timeout(unsigned long ptr)
6539{
6540 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6541 unsigned long iflag;
6542 uint32_t tmo_posted;
6543
6544 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6545 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6546 if (!tmo_posted)
6547 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6548 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6549
6550 if (!tmo_posted)
6551 lpfc_worker_wake_up(phba);
6552 return;
6553}
6554
6555
6556/**
6557 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6558 * @phba: Pointer to HBA context object.
6559 *
6560 * This function is called from worker thread when a mailbox command times out.
6561 * The caller is not required to hold any locks. This function will reset the
6562 * HBA and recover all the pending commands.
6563 **/
6564void
6565lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6566{
6567 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
04c68496 6568 MAILBOX_t *mb = &pmbox->u.mb;
da0436e9
JS
6569 struct lpfc_sli *psli = &phba->sli;
6570 struct lpfc_sli_ring *pring;
6571
6572 /* Check the pmbox pointer first. There is a race condition
6573 * between the mbox timeout handler getting executed in the
6574 * worklist and the mailbox actually completing. When this
6575 * race condition occurs, the mbox_active will be NULL.
6576 */
6577 spin_lock_irq(&phba->hbalock);
6578 if (pmbox == NULL) {
6579 lpfc_printf_log(phba, KERN_WARNING,
6580 LOG_MBOX | LOG_SLI,
6581 "0353 Active Mailbox cleared - mailbox timeout "
6582 "exiting\n");
6583 spin_unlock_irq(&phba->hbalock);
6584 return;
6585 }
6586
6587 /* Mbox cmd <mbxCommand> timeout */
6588 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6589 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6590 mb->mbxCommand,
6591 phba->pport->port_state,
6592 phba->sli.sli_flag,
6593 phba->sli.mbox_active);
6594 spin_unlock_irq(&phba->hbalock);
6595
6596 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6597 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 6598 * it to fail all outstanding SCSI IO.
da0436e9
JS
6599 */
6600 spin_lock_irq(&phba->pport->work_port_lock);
6601 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6602 spin_unlock_irq(&phba->pport->work_port_lock);
6603 spin_lock_irq(&phba->hbalock);
6604 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 6605 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
6606 spin_unlock_irq(&phba->hbalock);
6607
6608 pring = &psli->ring[psli->fcp_ring];
6609 lpfc_sli_abort_iocb_ring(phba, pring);
6610
6611 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6612 "0345 Resetting board due to mailbox timeout\n");
6613
6614 /* Reset the HBA device */
6615 lpfc_reset_hba(phba);
6616}
6617
6618/**
6619 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6620 * @phba: Pointer to HBA context object.
6621 * @pmbox: Pointer to mailbox object.
6622 * @flag: Flag indicating how the mailbox need to be processed.
6623 *
6624 * This function is called by discovery code and HBA management code
6625 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6626 * function gets the hbalock to protect the data structures.
6627 * The mailbox command can be submitted in polling mode, in which case
6628 * this function will wait in a polling loop for the completion of the
6629 * mailbox.
6630 * If the mailbox is submitted in no_wait mode (not polling) the
6631 * function will submit the command and returns immediately without waiting
6632 * for the mailbox completion. The no_wait is supported only when HBA
6633 * is in SLI2/SLI3 mode - interrupts are enabled.
6634 * The SLI interface allows only one mailbox pending at a time. If the
6635 * mailbox is issued in polling mode and there is already a mailbox
6636 * pending, then the function will return an error. If the mailbox is issued
6637 * in NO_WAIT mode and there is a mailbox pending already, the function
6638 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6639 * The sli layer owns the mailbox object until the completion of mailbox
6640 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6641 * return codes the caller owns the mailbox command after the return of
6642 * the function.
e59058c4 6643 **/
3772a991
JS
6644static int
6645lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6646 uint32_t flag)
dea3101e 6647{
bf07bdea 6648 MAILBOX_t *mbx;
2e0fef85 6649 struct lpfc_sli *psli = &phba->sli;
dea3101e 6650 uint32_t status, evtctr;
9940b97b 6651 uint32_t ha_copy, hc_copy;
dea3101e 6652 int i;
09372820 6653 unsigned long timeout;
dea3101e 6654 unsigned long drvr_flag = 0;
34b02dcd 6655 uint32_t word0, ldata;
dea3101e 6656 void __iomem *to_slim;
58da1ffb
JS
6657 int processing_queue = 0;
6658
6659 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6660 if (!pmbox) {
8568a4d2 6661 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 6662 /* processing mbox queue from intr_handler */
3772a991
JS
6663 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6664 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6665 return MBX_SUCCESS;
6666 }
58da1ffb 6667 processing_queue = 1;
58da1ffb
JS
6668 pmbox = lpfc_mbox_get(phba);
6669 if (!pmbox) {
6670 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6671 return MBX_SUCCESS;
6672 }
6673 }
dea3101e 6674
ed957684 6675 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 6676 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 6677 if(!pmbox->vport) {
58da1ffb 6678 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 6679 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 6680 LOG_MBOX | LOG_VPORT,
e8b62011 6681 "1806 Mbox x%x failed. No vport\n",
3772a991 6682 pmbox->u.mb.mbxCommand);
ed957684 6683 dump_stack();
58da1ffb 6684 goto out_not_finished;
ed957684
JS
6685 }
6686 }
6687
8d63f375 6688 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
6689 if (unlikely(pci_channel_offline(phba->pcidev))) {
6690 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6691 goto out_not_finished;
6692 }
8d63f375 6693
a257bf90
JS
6694 /* If HBA has a deferred error attention, fail the iocb. */
6695 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6696 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6697 goto out_not_finished;
6698 }
6699
dea3101e 6700 psli = &phba->sli;
92d7f7b0 6701
bf07bdea 6702 mbx = &pmbox->u.mb;
dea3101e 6703 status = MBX_SUCCESS;
6704
2e0fef85
JS
6705 if (phba->link_state == LPFC_HBA_ERROR) {
6706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
6707
6708 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6709 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6710 "(%d):0311 Mailbox command x%x cannot "
6711 "issue Data: x%x x%x\n",
6712 pmbox->vport ? pmbox->vport->vpi : 0,
6713 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 6714 goto out_not_finished;
41415862
JW
6715 }
6716
bf07bdea 6717 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
6718 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6719 !(hc_copy & HC_MBINT_ENA)) {
6720 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6721 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
6722 "(%d):2528 Mailbox command x%x cannot "
6723 "issue Data: x%x x%x\n",
6724 pmbox->vport ? pmbox->vport->vpi : 0,
6725 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
6726 goto out_not_finished;
6727 }
9290831f
JS
6728 }
6729
dea3101e 6730 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6731 /* Polling for a mbox command when another one is already active
6732 * is not allowed in SLI. Also, the driver must have established
6733 * SLI2 mode to queue and process multiple mbox commands.
6734 */
6735
6736 if (flag & MBX_POLL) {
2e0fef85 6737 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6738
6739 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6740 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6741 "(%d):2529 Mailbox command x%x "
6742 "cannot issue Data: x%x x%x\n",
6743 pmbox->vport ? pmbox->vport->vpi : 0,
6744 pmbox->u.mb.mbxCommand,
6745 psli->sli_flag, flag);
58da1ffb 6746 goto out_not_finished;
dea3101e 6747 }
6748
3772a991 6749 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 6750 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6751 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6753 "(%d):2530 Mailbox command x%x "
6754 "cannot issue Data: x%x x%x\n",
6755 pmbox->vport ? pmbox->vport->vpi : 0,
6756 pmbox->u.mb.mbxCommand,
6757 psli->sli_flag, flag);
58da1ffb 6758 goto out_not_finished;
dea3101e 6759 }
6760
dea3101e 6761 /* Another mailbox command is still being processed, queue this
6762 * command to be processed later.
6763 */
6764 lpfc_mbox_put(phba, pmbox);
6765
6766 /* Mbox cmd issue - BUSY */
ed957684 6767 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6768 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 6769 "x%x x%x x%x x%x\n",
92d7f7b0 6770 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
bf07bdea 6771 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 6772 psli->sli_flag, flag);
dea3101e 6773
6774 psli->slistat.mbox_busy++;
2e0fef85 6775 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6776
858c9f6c
JS
6777 if (pmbox->vport) {
6778 lpfc_debugfs_disc_trc(pmbox->vport,
6779 LPFC_DISC_TRC_MBOX_VPORT,
6780 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6781 (uint32_t)mbx->mbxCommand,
6782 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6783 }
6784 else {
6785 lpfc_debugfs_disc_trc(phba->pport,
6786 LPFC_DISC_TRC_MBOX,
6787 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6788 (uint32_t)mbx->mbxCommand,
6789 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6790 }
6791
2e0fef85 6792 return MBX_BUSY;
dea3101e 6793 }
6794
dea3101e 6795 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6796
6797 /* If we are not polling, we MUST be in SLI2 mode */
6798 if (flag != MBX_POLL) {
3772a991 6799 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 6800 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 6801 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6802 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6803 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6804 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6805 "(%d):2531 Mailbox command x%x "
6806 "cannot issue Data: x%x x%x\n",
6807 pmbox->vport ? pmbox->vport->vpi : 0,
6808 pmbox->u.mb.mbxCommand,
6809 psli->sli_flag, flag);
58da1ffb 6810 goto out_not_finished;
dea3101e 6811 }
6812 /* timeout active mbox command */
a309a6b6 6813 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 6814 (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
dea3101e 6815 }
6816
6817 /* Mailbox cmd <cmd> issue */
ed957684 6818 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6819 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 6820 "x%x\n",
e8b62011 6821 pmbox->vport ? pmbox->vport->vpi : 0,
bf07bdea 6822 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 6823 psli->sli_flag, flag);
dea3101e 6824
bf07bdea 6825 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
6826 if (pmbox->vport) {
6827 lpfc_debugfs_disc_trc(pmbox->vport,
6828 LPFC_DISC_TRC_MBOX_VPORT,
6829 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6830 (uint32_t)mbx->mbxCommand,
6831 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6832 }
6833 else {
6834 lpfc_debugfs_disc_trc(phba->pport,
6835 LPFC_DISC_TRC_MBOX,
6836 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6837 (uint32_t)mbx->mbxCommand,
6838 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6839 }
6840 }
6841
dea3101e 6842 psli->slistat.mbox_cmd++;
6843 evtctr = psli->slistat.mbox_event;
6844
6845 /* next set own bit for the adapter and copy over command word */
bf07bdea 6846 mbx->mbxOwner = OWN_CHIP;
dea3101e 6847
3772a991 6848 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
6849 /* Populate mbox extension offset word. */
6850 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 6851 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
6852 = (uint8_t *)phba->mbox_ext
6853 - (uint8_t *)phba->mbox;
6854 }
6855
6856 /* Copy the mailbox extension data */
6857 if (pmbox->in_ext_byte_len && pmbox->context2) {
6858 lpfc_sli_pcimem_bcopy(pmbox->context2,
6859 (uint8_t *)phba->mbox_ext,
6860 pmbox->in_ext_byte_len);
6861 }
6862 /* Copy command data to host SLIM area */
bf07bdea 6863 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6864 } else {
7a470277
JS
6865 /* Populate mbox extension offset word. */
6866 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 6867 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
6868 = MAILBOX_HBA_EXT_OFFSET;
6869
6870 /* Copy the mailbox extension data */
6871 if (pmbox->in_ext_byte_len && pmbox->context2) {
6872 lpfc_memcpy_to_slim(phba->MBslimaddr +
6873 MAILBOX_HBA_EXT_OFFSET,
6874 pmbox->context2, pmbox->in_ext_byte_len);
6875
6876 }
bf07bdea 6877 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6878 /* copy command data into host mbox for cmpl */
bf07bdea 6879 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6880 }
6881
6882 /* First copy mbox command data to HBA SLIM, skip past first
6883 word */
6884 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 6885 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 6886 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6887
6888 /* Next copy over first word, with mbxOwner set */
bf07bdea 6889 ldata = *((uint32_t *)mbx);
dea3101e 6890 to_slim = phba->MBslimaddr;
6891 writel(ldata, to_slim);
6892 readl(to_slim); /* flush */
6893
bf07bdea 6894 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6895 /* switch over to host mailbox */
3772a991 6896 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 6897 }
6898 }
6899
6900 wmb();
dea3101e 6901
6902 switch (flag) {
6903 case MBX_NOWAIT:
09372820 6904 /* Set up reference to mailbox command */
dea3101e 6905 psli->mbox_active = pmbox;
09372820
JS
6906 /* Interrupt board to do it */
6907 writel(CA_MBATT, phba->CAregaddr);
6908 readl(phba->CAregaddr); /* flush */
6909 /* Don't wait for it to finish, just return */
dea3101e 6910 break;
6911
6912 case MBX_POLL:
09372820 6913 /* Set up null reference to mailbox command */
dea3101e 6914 psli->mbox_active = NULL;
09372820
JS
6915 /* Interrupt board to do it */
6916 writel(CA_MBATT, phba->CAregaddr);
6917 readl(phba->CAregaddr); /* flush */
6918
3772a991 6919 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6920 /* First read mbox status word */
34b02dcd 6921 word0 = *((uint32_t *)phba->mbox);
dea3101e 6922 word0 = le32_to_cpu(word0);
6923 } else {
6924 /* First read mbox status word */
9940b97b
JS
6925 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6926 spin_unlock_irqrestore(&phba->hbalock,
6927 drvr_flag);
6928 goto out_not_finished;
6929 }
dea3101e 6930 }
6931
6932 /* Read the HBA Host Attention Register */
9940b97b
JS
6933 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6934 spin_unlock_irqrestore(&phba->hbalock,
6935 drvr_flag);
6936 goto out_not_finished;
6937 }
a183a15f
JS
6938 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6939 1000) + jiffies;
09372820 6940 i = 0;
dea3101e 6941 /* Wait for command to complete */
41415862
JW
6942 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6943 (!(ha_copy & HA_MBATT) &&
2e0fef85 6944 (phba->link_state > LPFC_WARM_START))) {
09372820 6945 if (time_after(jiffies, timeout)) {
dea3101e 6946 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6947 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 6948 drvr_flag);
58da1ffb 6949 goto out_not_finished;
dea3101e 6950 }
6951
6952 /* Check if we took a mbox interrupt while we were
6953 polling */
6954 if (((word0 & OWN_CHIP) != OWN_CHIP)
6955 && (evtctr != psli->slistat.mbox_event))
6956 break;
6957
09372820
JS
6958 if (i++ > 10) {
6959 spin_unlock_irqrestore(&phba->hbalock,
6960 drvr_flag);
6961 msleep(1);
6962 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6963 }
dea3101e 6964
3772a991 6965 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6966 /* First copy command data */
34b02dcd 6967 word0 = *((uint32_t *)phba->mbox);
dea3101e 6968 word0 = le32_to_cpu(word0);
bf07bdea 6969 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6970 MAILBOX_t *slimmb;
34b02dcd 6971 uint32_t slimword0;
dea3101e 6972 /* Check real SLIM for any errors */
6973 slimword0 = readl(phba->MBslimaddr);
6974 slimmb = (MAILBOX_t *) & slimword0;
6975 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6976 && slimmb->mbxStatus) {
6977 psli->sli_flag &=
3772a991 6978 ~LPFC_SLI_ACTIVE;
dea3101e 6979 word0 = slimword0;
6980 }
6981 }
6982 } else {
6983 /* First copy command data */
6984 word0 = readl(phba->MBslimaddr);
6985 }
6986 /* Read the HBA Host Attention Register */
9940b97b
JS
6987 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6988 spin_unlock_irqrestore(&phba->hbalock,
6989 drvr_flag);
6990 goto out_not_finished;
6991 }
dea3101e 6992 }
6993
3772a991 6994 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6995 /* copy results back to user */
bf07bdea 6996 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7a470277
JS
6997 /* Copy the mailbox extension data */
6998 if (pmbox->out_ext_byte_len && pmbox->context2) {
6999 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7000 pmbox->context2,
7001 pmbox->out_ext_byte_len);
7002 }
dea3101e 7003 } else {
7004 /* First copy command data */
bf07bdea 7005 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
dea3101e 7006 MAILBOX_CMD_SIZE);
7a470277
JS
7007 /* Copy the mailbox extension data */
7008 if (pmbox->out_ext_byte_len && pmbox->context2) {
7009 lpfc_memcpy_from_slim(pmbox->context2,
7010 phba->MBslimaddr +
7011 MAILBOX_HBA_EXT_OFFSET,
7012 pmbox->out_ext_byte_len);
dea3101e 7013 }
7014 }
7015
7016 writel(HA_MBATT, phba->HAregaddr);
7017 readl(phba->HAregaddr); /* flush */
7018
7019 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 7020 status = mbx->mbxStatus;
dea3101e 7021 }
7022
2e0fef85
JS
7023 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7024 return status;
58da1ffb
JS
7025
7026out_not_finished:
7027 if (processing_queue) {
da0436e9 7028 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
7029 lpfc_mbox_cmpl_put(phba, pmbox);
7030 }
7031 return MBX_NOT_FINISHED;
dea3101e 7032}
7033
f1126688
JS
7034/**
7035 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7036 * @phba: Pointer to HBA context object.
7037 *
7038 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7039 * the driver internal pending mailbox queue. It will then try to wait out the
7040 * possible outstanding mailbox command before return.
7041 *
7042 * Returns:
7043 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7044 * the outstanding mailbox command timed out.
7045 **/
7046static int
7047lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7048{
7049 struct lpfc_sli *psli = &phba->sli;
f1126688 7050 int rc = 0;
a183a15f 7051 unsigned long timeout = 0;
f1126688
JS
7052
7053 /* Mark the asynchronous mailbox command posting as blocked */
7054 spin_lock_irq(&phba->hbalock);
7055 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
7056 /* Determine how long we might wait for the active mailbox
7057 * command to be gracefully completed by firmware.
7058 */
a183a15f
JS
7059 if (phba->sli.mbox_active)
7060 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7061 phba->sli.mbox_active) *
7062 1000) + jiffies;
7063 spin_unlock_irq(&phba->hbalock);
7064
f1126688
JS
7065 /* Wait for the outstnading mailbox command to complete */
7066 while (phba->sli.mbox_active) {
7067 /* Check active mailbox complete status every 2ms */
7068 msleep(2);
7069 if (time_after(jiffies, timeout)) {
7070 /* Timeout, marked the outstanding cmd not complete */
7071 rc = 1;
7072 break;
7073 }
7074 }
7075
7076 /* Can not cleanly block async mailbox command, fails it */
7077 if (rc) {
7078 spin_lock_irq(&phba->hbalock);
7079 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7080 spin_unlock_irq(&phba->hbalock);
7081 }
7082 return rc;
7083}
7084
7085/**
7086 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7087 * @phba: Pointer to HBA context object.
7088 *
7089 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7090 * commands from the driver internal pending mailbox queue. It makes sure
7091 * that there is no outstanding mailbox command before resuming posting
7092 * asynchronous mailbox commands. If, for any reason, there is outstanding
7093 * mailbox command, it will try to wait it out before resuming asynchronous
7094 * mailbox command posting.
7095 **/
7096static void
7097lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7098{
7099 struct lpfc_sli *psli = &phba->sli;
7100
7101 spin_lock_irq(&phba->hbalock);
7102 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7103 /* Asynchronous mailbox posting is not blocked, do nothing */
7104 spin_unlock_irq(&phba->hbalock);
7105 return;
7106 }
7107
7108 /* Outstanding synchronous mailbox command is guaranteed to be done,
7109 * successful or timeout, after timing-out the outstanding mailbox
7110 * command shall always be removed, so just unblock posting async
7111 * mailbox command and resume
7112 */
7113 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7114 spin_unlock_irq(&phba->hbalock);
7115
7116 /* wake up worker thread to post asynchronlous mailbox command */
7117 lpfc_worker_wake_up(phba);
7118}
7119
2d843edc
JS
7120/**
7121 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7122 * @phba: Pointer to HBA context object.
7123 * @mboxq: Pointer to mailbox object.
7124 *
7125 * The function waits for the bootstrap mailbox register ready bit from
7126 * port for twice the regular mailbox command timeout value.
7127 *
7128 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7129 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7130 **/
7131static int
7132lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7133{
7134 uint32_t db_ready;
7135 unsigned long timeout;
7136 struct lpfc_register bmbx_reg;
7137
7138 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7139 * 1000) + jiffies;
7140
7141 do {
7142 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7143 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7144 if (!db_ready)
7145 msleep(2);
7146
7147 if (time_after(jiffies, timeout))
7148 return MBXERR_ERROR;
7149 } while (!db_ready);
7150
7151 return 0;
7152}
7153
da0436e9
JS
7154/**
7155 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7156 * @phba: Pointer to HBA context object.
7157 * @mboxq: Pointer to mailbox object.
7158 *
7159 * The function posts a mailbox to the port. The mailbox is expected
7160 * to be comletely filled in and ready for the port to operate on it.
7161 * This routine executes a synchronous completion operation on the
7162 * mailbox by polling for its completion.
7163 *
7164 * The caller must not be holding any locks when calling this routine.
7165 *
7166 * Returns:
7167 * MBX_SUCCESS - mailbox posted successfully
7168 * Any of the MBX error values.
7169 **/
7170static int
7171lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7172{
7173 int rc = MBX_SUCCESS;
7174 unsigned long iflag;
da0436e9
JS
7175 uint32_t mcqe_status;
7176 uint32_t mbx_cmnd;
da0436e9
JS
7177 struct lpfc_sli *psli = &phba->sli;
7178 struct lpfc_mqe *mb = &mboxq->u.mqe;
7179 struct lpfc_bmbx_create *mbox_rgn;
7180 struct dma_address *dma_address;
da0436e9
JS
7181
7182 /*
7183 * Only one mailbox can be active to the bootstrap mailbox region
7184 * at a time and there is no queueing provided.
7185 */
7186 spin_lock_irqsave(&phba->hbalock, iflag);
7187 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7188 spin_unlock_irqrestore(&phba->hbalock, iflag);
7189 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7190 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7191 "cannot issue Data: x%x x%x\n",
7192 mboxq->vport ? mboxq->vport->vpi : 0,
7193 mboxq->u.mb.mbxCommand,
a183a15f
JS
7194 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7195 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7196 psli->sli_flag, MBX_POLL);
7197 return MBXERR_ERROR;
7198 }
7199 /* The server grabs the token and owns it until release */
7200 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7201 phba->sli.mbox_active = mboxq;
7202 spin_unlock_irqrestore(&phba->hbalock, iflag);
7203
2d843edc
JS
7204 /* wait for bootstrap mbox register for readyness */
7205 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7206 if (rc)
7207 goto exit;
7208
da0436e9
JS
7209 /*
7210 * Initialize the bootstrap memory region to avoid stale data areas
7211 * in the mailbox post. Then copy the caller's mailbox contents to
7212 * the bmbx mailbox region.
7213 */
7214 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7215 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7216 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7217 sizeof(struct lpfc_mqe));
7218
7219 /* Post the high mailbox dma address to the port and wait for ready. */
7220 dma_address = &phba->sli4_hba.bmbx.dma_address;
7221 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7222
2d843edc
JS
7223 /* wait for bootstrap mbox register for hi-address write done */
7224 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7225 if (rc)
7226 goto exit;
da0436e9
JS
7227
7228 /* Post the low mailbox dma address to the port. */
7229 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 7230
2d843edc
JS
7231 /* wait for bootstrap mbox register for low address write done */
7232 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7233 if (rc)
7234 goto exit;
da0436e9
JS
7235
7236 /*
7237 * Read the CQ to ensure the mailbox has completed.
7238 * If so, update the mailbox status so that the upper layers
7239 * can complete the request normally.
7240 */
7241 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7242 sizeof(struct lpfc_mqe));
7243 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7244 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7245 sizeof(struct lpfc_mcqe));
7246 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
7247 /*
7248 * When the CQE status indicates a failure and the mailbox status
7249 * indicates success then copy the CQE status into the mailbox status
7250 * (and prefix it with x4000).
7251 */
da0436e9 7252 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
7253 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7254 bf_set(lpfc_mqe_status, mb,
7255 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 7256 rc = MBXERR_ERROR;
d7c47992
JS
7257 } else
7258 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
7259
7260 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7261 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
7262 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7263 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
7264 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7265 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7266 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7267 bf_get(lpfc_mqe_status, mb),
7268 mb->un.mb_words[0], mb->un.mb_words[1],
7269 mb->un.mb_words[2], mb->un.mb_words[3],
7270 mb->un.mb_words[4], mb->un.mb_words[5],
7271 mb->un.mb_words[6], mb->un.mb_words[7],
7272 mb->un.mb_words[8], mb->un.mb_words[9],
7273 mb->un.mb_words[10], mb->un.mb_words[11],
7274 mb->un.mb_words[12], mboxq->mcqe.word0,
7275 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7276 mboxq->mcqe.trailer);
7277exit:
7278 /* We are holding the token, no needed for lock when release */
7279 spin_lock_irqsave(&phba->hbalock, iflag);
7280 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7281 phba->sli.mbox_active = NULL;
7282 spin_unlock_irqrestore(&phba->hbalock, iflag);
7283 return rc;
7284}
7285
7286/**
7287 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7288 * @phba: Pointer to HBA context object.
7289 * @pmbox: Pointer to mailbox object.
7290 * @flag: Flag indicating how the mailbox need to be processed.
7291 *
7292 * This function is called by discovery code and HBA management code to submit
7293 * a mailbox command to firmware with SLI-4 interface spec.
7294 *
7295 * Return codes the caller owns the mailbox command after the return of the
7296 * function.
7297 **/
7298static int
7299lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7300 uint32_t flag)
7301{
7302 struct lpfc_sli *psli = &phba->sli;
7303 unsigned long iflags;
7304 int rc;
7305
b76f2dc9
JS
7306 /* dump from issue mailbox command if setup */
7307 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7308
8fa38513
JS
7309 rc = lpfc_mbox_dev_check(phba);
7310 if (unlikely(rc)) {
7311 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7312 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
7313 "cannot issue Data: x%x x%x\n",
7314 mboxq->vport ? mboxq->vport->vpi : 0,
7315 mboxq->u.mb.mbxCommand,
a183a15f
JS
7316 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7317 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
7318 psli->sli_flag, flag);
7319 goto out_not_finished;
7320 }
7321
da0436e9
JS
7322 /* Detect polling mode and jump to a handler */
7323 if (!phba->sli4_hba.intr_enable) {
7324 if (flag == MBX_POLL)
7325 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7326 else
7327 rc = -EIO;
7328 if (rc != MBX_SUCCESS)
0558056c 7329 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 7330 "(%d):2541 Mailbox command x%x "
cc459f19
JS
7331 "(x%x/x%x) failure: "
7332 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7333 "Data: x%x x%x\n,",
da0436e9
JS
7334 mboxq->vport ? mboxq->vport->vpi : 0,
7335 mboxq->u.mb.mbxCommand,
a183a15f
JS
7336 lpfc_sli_config_mbox_subsys_get(phba,
7337 mboxq),
7338 lpfc_sli_config_mbox_opcode_get(phba,
7339 mboxq),
cc459f19
JS
7340 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7341 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7342 bf_get(lpfc_mcqe_ext_status,
7343 &mboxq->mcqe),
da0436e9
JS
7344 psli->sli_flag, flag);
7345 return rc;
7346 } else if (flag == MBX_POLL) {
f1126688
JS
7347 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7348 "(%d):2542 Try to issue mailbox command "
a183a15f 7349 "x%x (x%x/x%x) synchronously ahead of async"
f1126688 7350 "mailbox command queue: x%x x%x\n",
da0436e9
JS
7351 mboxq->vport ? mboxq->vport->vpi : 0,
7352 mboxq->u.mb.mbxCommand,
a183a15f
JS
7353 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7354 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 7355 psli->sli_flag, flag);
f1126688
JS
7356 /* Try to block the asynchronous mailbox posting */
7357 rc = lpfc_sli4_async_mbox_block(phba);
7358 if (!rc) {
7359 /* Successfully blocked, now issue sync mbox cmd */
7360 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7361 if (rc != MBX_SUCCESS)
cc459f19 7362 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 7363 LOG_MBOX | LOG_SLI,
cc459f19
JS
7364 "(%d):2597 Sync Mailbox command "
7365 "x%x (x%x/x%x) failure: "
7366 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7367 "Data: x%x x%x\n,",
7368 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
7369 mboxq->u.mb.mbxCommand,
7370 lpfc_sli_config_mbox_subsys_get(phba,
7371 mboxq),
7372 lpfc_sli_config_mbox_opcode_get(phba,
7373 mboxq),
cc459f19
JS
7374 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7375 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7376 bf_get(lpfc_mcqe_ext_status,
7377 &mboxq->mcqe),
a183a15f 7378 psli->sli_flag, flag);
f1126688
JS
7379 /* Unblock the async mailbox posting afterward */
7380 lpfc_sli4_async_mbox_unblock(phba);
7381 }
7382 return rc;
da0436e9
JS
7383 }
7384
7385 /* Now, interrupt mode asynchrous mailbox command */
7386 rc = lpfc_mbox_cmd_check(phba, mboxq);
7387 if (rc) {
7388 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7389 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7390 "cannot issue Data: x%x x%x\n",
7391 mboxq->vport ? mboxq->vport->vpi : 0,
7392 mboxq->u.mb.mbxCommand,
a183a15f
JS
7393 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7394 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7395 psli->sli_flag, flag);
7396 goto out_not_finished;
7397 }
da0436e9
JS
7398
7399 /* Put the mailbox command to the driver internal FIFO */
7400 psli->slistat.mbox_busy++;
7401 spin_lock_irqsave(&phba->hbalock, iflags);
7402 lpfc_mbox_put(phba, mboxq);
7403 spin_unlock_irqrestore(&phba->hbalock, iflags);
7404 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7405 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 7406 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
7407 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7408 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
7409 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7410 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7411 phba->pport->port_state,
7412 psli->sli_flag, MBX_NOWAIT);
7413 /* Wake up worker thread to transport mailbox command from head */
7414 lpfc_worker_wake_up(phba);
7415
7416 return MBX_BUSY;
7417
7418out_not_finished:
7419 return MBX_NOT_FINISHED;
7420}
7421
7422/**
7423 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7424 * @phba: Pointer to HBA context object.
7425 *
7426 * This function is called by worker thread to send a mailbox command to
7427 * SLI4 HBA firmware.
7428 *
7429 **/
7430int
7431lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7432{
7433 struct lpfc_sli *psli = &phba->sli;
7434 LPFC_MBOXQ_t *mboxq;
7435 int rc = MBX_SUCCESS;
7436 unsigned long iflags;
7437 struct lpfc_mqe *mqe;
7438 uint32_t mbx_cmnd;
7439
7440 /* Check interrupt mode before post async mailbox command */
7441 if (unlikely(!phba->sli4_hba.intr_enable))
7442 return MBX_NOT_FINISHED;
7443
7444 /* Check for mailbox command service token */
7445 spin_lock_irqsave(&phba->hbalock, iflags);
7446 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7447 spin_unlock_irqrestore(&phba->hbalock, iflags);
7448 return MBX_NOT_FINISHED;
7449 }
7450 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7451 spin_unlock_irqrestore(&phba->hbalock, iflags);
7452 return MBX_NOT_FINISHED;
7453 }
7454 if (unlikely(phba->sli.mbox_active)) {
7455 spin_unlock_irqrestore(&phba->hbalock, iflags);
7456 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7457 "0384 There is pending active mailbox cmd\n");
7458 return MBX_NOT_FINISHED;
7459 }
7460 /* Take the mailbox command service token */
7461 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7462
7463 /* Get the next mailbox command from head of queue */
7464 mboxq = lpfc_mbox_get(phba);
7465
7466 /* If no more mailbox command waiting for post, we're done */
7467 if (!mboxq) {
7468 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7469 spin_unlock_irqrestore(&phba->hbalock, iflags);
7470 return MBX_SUCCESS;
7471 }
7472 phba->sli.mbox_active = mboxq;
7473 spin_unlock_irqrestore(&phba->hbalock, iflags);
7474
7475 /* Check device readiness for posting mailbox command */
7476 rc = lpfc_mbox_dev_check(phba);
7477 if (unlikely(rc))
7478 /* Driver clean routine will clean up pending mailbox */
7479 goto out_not_finished;
7480
7481 /* Prepare the mbox command to be posted */
7482 mqe = &mboxq->u.mqe;
7483 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7484
7485 /* Start timer for the mbox_tmo and log some mailbox post messages */
7486 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 7487 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
7488
7489 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7490 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
7491 "x%x x%x\n",
7492 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
7493 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7494 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7495 phba->pport->port_state, psli->sli_flag);
7496
7497 if (mbx_cmnd != MBX_HEARTBEAT) {
7498 if (mboxq->vport) {
7499 lpfc_debugfs_disc_trc(mboxq->vport,
7500 LPFC_DISC_TRC_MBOX_VPORT,
7501 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7502 mbx_cmnd, mqe->un.mb_words[0],
7503 mqe->un.mb_words[1]);
7504 } else {
7505 lpfc_debugfs_disc_trc(phba->pport,
7506 LPFC_DISC_TRC_MBOX,
7507 "MBOX Send: cmd:x%x mb:x%x x%x",
7508 mbx_cmnd, mqe->un.mb_words[0],
7509 mqe->un.mb_words[1]);
7510 }
7511 }
7512 psli->slistat.mbox_cmd++;
7513
7514 /* Post the mailbox command to the port */
7515 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7516 if (rc != MBX_SUCCESS) {
7517 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7518 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7519 "cannot issue Data: x%x x%x\n",
7520 mboxq->vport ? mboxq->vport->vpi : 0,
7521 mboxq->u.mb.mbxCommand,
a183a15f
JS
7522 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7523 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7524 psli->sli_flag, MBX_NOWAIT);
7525 goto out_not_finished;
7526 }
7527
7528 return rc;
7529
7530out_not_finished:
7531 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
7532 if (phba->sli.mbox_active) {
7533 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7534 __lpfc_mbox_cmpl_put(phba, mboxq);
7535 /* Release the token */
7536 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7537 phba->sli.mbox_active = NULL;
7538 }
da0436e9
JS
7539 spin_unlock_irqrestore(&phba->hbalock, iflags);
7540
7541 return MBX_NOT_FINISHED;
7542}
7543
7544/**
7545 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7546 * @phba: Pointer to HBA context object.
7547 * @pmbox: Pointer to mailbox object.
7548 * @flag: Flag indicating how the mailbox need to be processed.
7549 *
7550 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7551 * the API jump table function pointer from the lpfc_hba struct.
7552 *
7553 * Return codes the caller owns the mailbox command after the return of the
7554 * function.
7555 **/
7556int
7557lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7558{
7559 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7560}
7561
7562/**
25985edc 7563 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
7564 * @phba: The hba struct for which this call is being executed.
7565 * @dev_grp: The HBA PCI-Device group number.
7566 *
7567 * This routine sets up the mbox interface API function jump table in @phba
7568 * struct.
7569 * Returns: 0 - success, -ENODEV - failure.
7570 **/
7571int
7572lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7573{
7574
7575 switch (dev_grp) {
7576 case LPFC_PCI_DEV_LP:
7577 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7578 phba->lpfc_sli_handle_slow_ring_event =
7579 lpfc_sli_handle_slow_ring_event_s3;
7580 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7581 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7582 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7583 break;
7584 case LPFC_PCI_DEV_OC:
7585 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7586 phba->lpfc_sli_handle_slow_ring_event =
7587 lpfc_sli_handle_slow_ring_event_s4;
7588 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7589 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7590 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7591 break;
7592 default:
7593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7594 "1420 Invalid HBA PCI-device group: 0x%x\n",
7595 dev_grp);
7596 return -ENODEV;
7597 break;
7598 }
7599 return 0;
7600}
7601
e59058c4 7602/**
3621a710 7603 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
7604 * @phba: Pointer to HBA context object.
7605 * @pring: Pointer to driver SLI ring object.
7606 * @piocb: Pointer to address of newly added command iocb.
7607 *
7608 * This function is called with hbalock held to add a command
7609 * iocb to the txq when SLI layer cannot submit the command iocb
7610 * to the ring.
7611 **/
2a9bf3d0 7612void
92d7f7b0 7613__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7614 struct lpfc_iocbq *piocb)
dea3101e 7615{
7616 /* Insert the caller's iocb in the txq tail for later processing. */
7617 list_add_tail(&piocb->list, &pring->txq);
7618 pring->txq_cnt++;
dea3101e 7619}
7620
e59058c4 7621/**
3621a710 7622 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
7623 * @phba: Pointer to HBA context object.
7624 * @pring: Pointer to driver SLI ring object.
7625 * @piocb: Pointer to address of newly added command iocb.
7626 *
7627 * This function is called with hbalock held before a new
7628 * iocb is submitted to the firmware. This function checks
7629 * txq to flush the iocbs in txq to Firmware before
7630 * submitting new iocbs to the Firmware.
7631 * If there are iocbs in the txq which need to be submitted
7632 * to firmware, lpfc_sli_next_iocb returns the first element
7633 * of the txq after dequeuing it from txq.
7634 * If there is no iocb in the txq then the function will return
7635 * *piocb and *piocb is set to NULL. Caller needs to check
7636 * *piocb to find if there are more commands in the txq.
7637 **/
dea3101e 7638static struct lpfc_iocbq *
7639lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7640 struct lpfc_iocbq **piocb)
dea3101e 7641{
7642 struct lpfc_iocbq * nextiocb;
7643
7644 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7645 if (!nextiocb) {
7646 nextiocb = *piocb;
7647 *piocb = NULL;
7648 }
7649
7650 return nextiocb;
7651}
7652
e59058c4 7653/**
3772a991 7654 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 7655 * @phba: Pointer to HBA context object.
3772a991 7656 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
7657 * @piocb: Pointer to command iocb.
7658 * @flag: Flag indicating if this command can be put into txq.
7659 *
3772a991
JS
7660 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7661 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7662 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7663 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7664 * this function allows only iocbs for posting buffers. This function finds
7665 * next available slot in the command ring and posts the command to the
7666 * available slot and writes the port attention register to request HBA start
7667 * processing new iocb. If there is no slot available in the ring and
7668 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7669 * the function returns IOCB_BUSY.
e59058c4 7670 *
3772a991
JS
7671 * This function is called with hbalock held. The function will return success
7672 * after it successfully submit the iocb to firmware or after adding to the
7673 * txq.
e59058c4 7674 **/
98c9ea5c 7675static int
3772a991 7676__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 7677 struct lpfc_iocbq *piocb, uint32_t flag)
7678{
7679 struct lpfc_iocbq *nextiocb;
7680 IOCB_t *iocb;
3772a991 7681 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea3101e 7682
92d7f7b0
JS
7683 if (piocb->iocb_cmpl && (!piocb->vport) &&
7684 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7685 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7686 lpfc_printf_log(phba, KERN_ERR,
7687 LOG_SLI | LOG_VPORT,
e8b62011 7688 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
7689 piocb->iocb.ulpCommand);
7690 dump_stack();
7691 return IOCB_ERROR;
7692 }
7693
7694
8d63f375
LV
7695 /* If the PCI channel is in offline state, do not post iocbs. */
7696 if (unlikely(pci_channel_offline(phba->pcidev)))
7697 return IOCB_ERROR;
7698
a257bf90
JS
7699 /* If HBA has a deferred error attention, fail the iocb. */
7700 if (unlikely(phba->hba_flag & DEFER_ERATT))
7701 return IOCB_ERROR;
7702
dea3101e 7703 /*
7704 * We should never get an IOCB if we are in a < LINK_DOWN state
7705 */
2e0fef85 7706 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 7707 return IOCB_ERROR;
7708
7709 /*
7710 * Check to see if we are blocking IOCB processing because of a
0b727fea 7711 * outstanding event.
dea3101e 7712 */
0b727fea 7713 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 7714 goto iocb_busy;
7715
2e0fef85 7716 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 7717 /*
2680eeaa 7718 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 7719 * can be issued if the link is not up.
7720 */
7721 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
7722 case CMD_GEN_REQUEST64_CR:
7723 case CMD_GEN_REQUEST64_CX:
7724 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7725 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 7726 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
7727 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7728 MENLO_TRANSPORT_TYPE))
7729
7730 goto iocb_busy;
7731 break;
dea3101e 7732 case CMD_QUE_RING_BUF_CN:
7733 case CMD_QUE_RING_BUF64_CN:
dea3101e 7734 /*
7735 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7736 * completion, iocb_cmpl MUST be 0.
7737 */
7738 if (piocb->iocb_cmpl)
7739 piocb->iocb_cmpl = NULL;
7740 /*FALLTHROUGH*/
7741 case CMD_CREATE_XRI_CR:
2680eeaa
JS
7742 case CMD_CLOSE_XRI_CN:
7743 case CMD_CLOSE_XRI_CX:
dea3101e 7744 break;
7745 default:
7746 goto iocb_busy;
7747 }
7748
7749 /*
7750 * For FCP commands, we must be in a state where we can process link
7751 * attention events.
7752 */
7753 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 7754 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 7755 goto iocb_busy;
92d7f7b0 7756 }
dea3101e 7757
dea3101e 7758 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7759 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7760 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7761
7762 if (iocb)
7763 lpfc_sli_update_ring(phba, pring);
7764 else
7765 lpfc_sli_update_full_ring(phba, pring);
7766
7767 if (!piocb)
7768 return IOCB_SUCCESS;
7769
7770 goto out_busy;
7771
7772 iocb_busy:
7773 pring->stats.iocb_cmd_delay++;
7774
7775 out_busy:
7776
7777 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 7778 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 7779 return IOCB_SUCCESS;
7780 }
7781
7782 return IOCB_BUSY;
7783}
7784
3772a991 7785/**
4f774513
JS
7786 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7787 * @phba: Pointer to HBA context object.
7788 * @piocb: Pointer to command iocb.
7789 * @sglq: Pointer to the scatter gather queue object.
7790 *
7791 * This routine converts the bpl or bde that is in the IOCB
7792 * to a sgl list for the sli4 hardware. The physical address
7793 * of the bpl/bde is converted back to a virtual address.
7794 * If the IOCB contains a BPL then the list of BDE's is
7795 * converted to sli4_sge's. If the IOCB contains a single
7796 * BDE then it is converted to a single sli_sge.
7797 * The IOCB is still in cpu endianess so the contents of
7798 * the bpl can be used without byte swapping.
7799 *
7800 * Returns valid XRI = Success, NO_XRI = Failure.
7801**/
7802static uint16_t
7803lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7804 struct lpfc_sglq *sglq)
3772a991 7805{
4f774513
JS
7806 uint16_t xritag = NO_XRI;
7807 struct ulp_bde64 *bpl = NULL;
7808 struct ulp_bde64 bde;
7809 struct sli4_sge *sgl = NULL;
1b51197d 7810 struct lpfc_dmabuf *dmabuf;
4f774513
JS
7811 IOCB_t *icmd;
7812 int numBdes = 0;
7813 int i = 0;
63e801ce
JS
7814 uint32_t offset = 0; /* accumulated offset in the sg request list */
7815 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 7816
4f774513
JS
7817 if (!piocbq || !sglq)
7818 return xritag;
7819
7820 sgl = (struct sli4_sge *)sglq->sgl;
7821 icmd = &piocbq->iocb;
6b5151fd
JS
7822 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7823 return sglq->sli4_xritag;
4f774513
JS
7824 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7825 numBdes = icmd->un.genreq64.bdl.bdeSize /
7826 sizeof(struct ulp_bde64);
7827 /* The addrHigh and addrLow fields within the IOCB
7828 * have not been byteswapped yet so there is no
7829 * need to swap them back.
7830 */
1b51197d
JS
7831 if (piocbq->context3)
7832 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7833 else
7834 return xritag;
4f774513 7835
1b51197d 7836 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
7837 if (!bpl)
7838 return xritag;
7839
7840 for (i = 0; i < numBdes; i++) {
7841 /* Should already be byte swapped. */
28baac74
JS
7842 sgl->addr_hi = bpl->addrHigh;
7843 sgl->addr_lo = bpl->addrLow;
7844
0558056c 7845 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7846 if ((i+1) == numBdes)
7847 bf_set(lpfc_sli4_sge_last, sgl, 1);
7848 else
7849 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
7850 /* swap the size field back to the cpu so we
7851 * can assign it to the sgl.
7852 */
7853 bde.tus.w = le32_to_cpu(bpl->tus.w);
7854 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
7855 /* The offsets in the sgl need to be accumulated
7856 * separately for the request and reply lists.
7857 * The request is always first, the reply follows.
7858 */
7859 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7860 /* add up the reply sg entries */
7861 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7862 inbound++;
7863 /* first inbound? reset the offset */
7864 if (inbound == 1)
7865 offset = 0;
7866 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
7867 bf_set(lpfc_sli4_sge_type, sgl,
7868 LPFC_SGE_TYPE_DATA);
63e801ce
JS
7869 offset += bde.tus.f.bdeSize;
7870 }
546fc854 7871 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
7872 bpl++;
7873 sgl++;
7874 }
7875 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7876 /* The addrHigh and addrLow fields of the BDE have not
7877 * been byteswapped yet so they need to be swapped
7878 * before putting them in the sgl.
7879 */
7880 sgl->addr_hi =
7881 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7882 sgl->addr_lo =
7883 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 7884 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7885 bf_set(lpfc_sli4_sge_last, sgl, 1);
7886 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
7887 sgl->sge_len =
7888 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
7889 }
7890 return sglq->sli4_xritag;
3772a991 7891}
92d7f7b0 7892
e59058c4 7893/**
4f774513 7894 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
e59058c4 7895 * @phba: Pointer to HBA context object.
e59058c4 7896 *
a93ff37a 7897 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
8fa38513
JS
7898 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7899 * held.
4f774513
JS
7900 *
7901 * Return: index into SLI4 fast-path FCP queue index.
e59058c4 7902 **/
2a76a283 7903static inline uint32_t
8fa38513 7904lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
92d7f7b0 7905{
2a76a283 7906 int i;
92d7f7b0 7907
49aa143d
JS
7908 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7909 i = smp_processor_id();
7910 else
7911 i = atomic_add_return(1, &phba->fcp_qidx);
92d7f7b0 7912
67d12733 7913 i = (i % phba->cfg_fcp_io_channel);
2a76a283 7914 return i;
92d7f7b0
JS
7915}
7916
e59058c4 7917/**
4f774513 7918 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 7919 * @phba: Pointer to HBA context object.
4f774513
JS
7920 * @piocb: Pointer to command iocb.
7921 * @wqe: Pointer to the work queue entry.
e59058c4 7922 *
4f774513
JS
7923 * This routine converts the iocb command to its Work Queue Entry
7924 * equivalent. The wqe pointer should not have any fields set when
7925 * this routine is called because it will memcpy over them.
7926 * This routine does not set the CQ_ID or the WQEC bits in the
7927 * wqe.
e59058c4 7928 *
4f774513 7929 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 7930 **/
cf5bf97e 7931static int
4f774513
JS
7932lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7933 union lpfc_wqe *wqe)
cf5bf97e 7934{
5ffc266e 7935 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
7936 uint8_t ct = 0;
7937 uint32_t fip;
7938 uint32_t abort_tag;
7939 uint8_t command_type = ELS_COMMAND_NON_FIP;
7940 uint8_t cmnd;
7941 uint16_t xritag;
dcf2a4e0
JS
7942 uint16_t abrt_iotag;
7943 struct lpfc_iocbq *abrtiocbq;
4f774513 7944 struct ulp_bde64 *bpl = NULL;
f0d9bccc 7945 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
7946 int numBdes, i;
7947 struct ulp_bde64 bde;
c31098ce 7948 struct lpfc_nodelist *ndlp;
ff78d8f9 7949 uint32_t *pcmd;
1b51197d 7950 uint32_t if_type;
4f774513 7951
45ed1190 7952 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 7953 /* The fcp commands will set command type */
0c287589 7954 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 7955 command_type = FCP_COMMAND;
c868595d 7956 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
7957 command_type = ELS_COMMAND_FIP;
7958 else
7959 command_type = ELS_COMMAND_NON_FIP;
7960
4f774513
JS
7961 /* Some of the fields are in the right position already */
7962 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7963 abort_tag = (uint32_t) iocbq->iotag;
7964 xritag = iocbq->sli4_xritag;
f0d9bccc 7965 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
4f774513
JS
7966 /* words0-2 bpl convert bde */
7967 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
7968 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7969 sizeof(struct ulp_bde64);
4f774513
JS
7970 bpl = (struct ulp_bde64 *)
7971 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7972 if (!bpl)
7973 return IOCB_ERROR;
cf5bf97e 7974
4f774513
JS
7975 /* Should already be byte swapped. */
7976 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
7977 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
7978 /* swap the size field back to the cpu so we
7979 * can assign it to the sgl.
7980 */
7981 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
7982 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7983 total_len = 0;
7984 for (i = 0; i < numBdes; i++) {
7985 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
7986 total_len += bde.tus.f.bdeSize;
7987 }
4f774513 7988 } else
5ffc266e 7989 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 7990
4f774513
JS
7991 iocbq->iocb.ulpIoTag = iocbq->iotag;
7992 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 7993
4f774513
JS
7994 switch (iocbq->iocb.ulpCommand) {
7995 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
7996 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
7997 ndlp = iocbq->context_un.ndlp;
7998 else
7999 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
8000 if (!iocbq->iocb.ulpLe) {
8001 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8002 "2007 Only Limited Edition cmd Format"
8003 " supported 0x%x\n",
8004 iocbq->iocb.ulpCommand);
8005 return IOCB_ERROR;
8006 }
ff78d8f9 8007
5ffc266e 8008 wqe->els_req.payload_len = xmit_len;
4f774513
JS
8009 /* Els_reguest64 has a TMO */
8010 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8011 iocbq->iocb.ulpTimeout);
8012 /* Need a VF for word 4 set the vf bit*/
8013 bf_set(els_req64_vf, &wqe->els_req, 0);
8014 /* And a VFID for word 12 */
8015 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 8016 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
8017 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8018 iocbq->iocb.ulpContext);
8019 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8020 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 8021 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 8022 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
8023 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8024 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
8025 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8026 iocbq->context2)->virt);
1b51197d
JS
8027 if_type = bf_get(lpfc_sli_intf_if_type,
8028 &phba->sli4_hba.sli_intf);
8029 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 8030 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 8031 *pcmd == ELS_CMD_SCR ||
6b5151fd 8032 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 8033 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
8034 *pcmd == ELS_CMD_PLOGI)) {
8035 bf_set(els_req64_sp, &wqe->els_req, 1);
8036 bf_set(els_req64_sid, &wqe->els_req,
8037 iocbq->vport->fc_myDID);
939723a4
JS
8038 if ((*pcmd == ELS_CMD_FLOGI) &&
8039 !(phba->fc_topology ==
8040 LPFC_TOPOLOGY_LOOP))
8041 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
8042 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8043 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 8044 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 8045 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
8046 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8047 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8048 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8049 }
c868595d 8050 }
6d368e53
JS
8051 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8052 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
8053 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8054 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8055 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8056 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8057 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8058 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7851fe2c 8059 break;
5ffc266e 8060 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
8061 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8062 iocbq->iocb.un.ulpWord[3]);
8063 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 8064 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
8065 /* The entire sequence is transmitted for this IOCB */
8066 xmit_len = total_len;
8067 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
8068 if (phba->link_flag & LS_LOOPBACK_MODE)
8069 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 8070 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
8071 /* word3 iocb=io_tag32 wqe=reserved */
8072 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
8073 /* word4 relative_offset memcpy */
8074 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
8075 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8076 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8077 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8078 LPFC_WQE_IOD_WRITE);
8079 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8080 LPFC_WQE_LENLOC_WORD12);
8081 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
8082 wqe->xmit_sequence.xmit_len = xmit_len;
8083 command_type = OTHER_COMMAND;
7851fe2c 8084 break;
4f774513 8085 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
8086 /* word3 iocb=iotag32 wqe=seq_payload_len */
8087 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
8088 /* word4 iocb=rsvd wqe=rsvd */
8089 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8090 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 8091 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 8092 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
8093 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8094 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8095 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8096 LPFC_WQE_LENLOC_WORD3);
8097 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 8098 break;
4f774513
JS
8099 case CMD_FCP_IWRITE64_CR:
8100 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
8101 /* word3 iocb=iotag wqe=payload_offset_len */
8102 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8103 wqe->fcp_iwrite.payload_offset_len =
8104 xmit_len + sizeof(struct fcp_rsp);
8105 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8106 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8107 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8108 iocbq->iocb.ulpFCP2Rcvy);
8109 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8110 /* Always open the exchange */
8111 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
f0d9bccc
JS
8112 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8113 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8114 LPFC_WQE_LENLOC_WORD4);
8115 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8116 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8117 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7851fe2c 8118 break;
4f774513 8119 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
8120 /* word3 iocb=iotag wqe=payload_offset_len */
8121 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8122 wqe->fcp_iread.payload_offset_len =
5ffc266e 8123 xmit_len + sizeof(struct fcp_rsp);
f0d9bccc
JS
8124 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8125 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8126 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8127 iocbq->iocb.ulpFCP2Rcvy);
8128 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688
JS
8129 /* Always open the exchange */
8130 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
f0d9bccc
JS
8131 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8132 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8133 LPFC_WQE_LENLOC_WORD4);
8134 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8135 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7851fe2c 8137 break;
4f774513 8138 case CMD_FCP_ICMND64_CR:
f0d9bccc
JS
8139 /* word3 iocb=IO_TAG wqe=reserved */
8140 wqe->fcp_icmd.rsrvd3 = 0;
8141 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 8142 /* Always open the exchange */
f0d9bccc
JS
8143 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8144 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8145 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8146 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8147 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8148 LPFC_WQE_LENLOC_NONE);
8149 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
2a94aea4
JS
8150 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8151 iocbq->iocb.ulpFCP2Rcvy);
7851fe2c 8152 break;
4f774513 8153 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
8154 /* For this command calculate the xmit length of the
8155 * request bde.
8156 */
8157 xmit_len = 0;
8158 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8159 sizeof(struct ulp_bde64);
8160 for (i = 0; i < numBdes; i++) {
63e801ce 8161 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
8162 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8163 break;
63e801ce
JS
8164 xmit_len += bde.tus.f.bdeSize;
8165 }
f0d9bccc
JS
8166 /* word3 iocb=IO_TAG wqe=request_payload_len */
8167 wqe->gen_req.request_payload_len = xmit_len;
8168 /* word4 iocb=parameter wqe=relative_offset memcpy */
8169 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
8170 /* word6 context tag copied in memcpy */
8171 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8172 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8174 "2015 Invalid CT %x command 0x%x\n",
8175 ct, iocbq->iocb.ulpCommand);
8176 return IOCB_ERROR;
8177 }
f0d9bccc
JS
8178 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8179 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8180 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8181 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8182 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8183 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8184 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8185 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
4f774513 8186 command_type = OTHER_COMMAND;
7851fe2c 8187 break;
4f774513 8188 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 8189 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 8190 /* words0-2 BDE memcpy */
f0d9bccc
JS
8191 /* word3 iocb=iotag32 wqe=response_payload_len */
8192 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
8193 /* word4 */
8194 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
8195 /* word5 iocb=rsvd wge=did */
8196 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
8197 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8198
8199 if_type = bf_get(lpfc_sli_intf_if_type,
8200 &phba->sli4_hba.sli_intf);
8201 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8202 if (iocbq->vport->fc_flag & FC_PT2PT) {
8203 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8204 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8205 iocbq->vport->fc_myDID);
8206 if (iocbq->vport->fc_myDID == Fabric_DID) {
8207 bf_set(wqe_els_did,
8208 &wqe->xmit_els_rsp.wqe_dest, 0);
8209 }
8210 }
8211 }
f0d9bccc
JS
8212 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8213 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8214 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8215 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 8216 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 8217 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 8218 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 8219 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
8220 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8221 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8222 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8223 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8224 LPFC_WQE_LENLOC_WORD3);
8225 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
8226 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8227 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
8228 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8229 iocbq->context2)->virt);
8230 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
8231 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8232 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 8233 iocbq->vport->fc_myDID);
939723a4
JS
8234 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8235 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
8236 phba->vpi_ids[phba->pport->vpi]);
8237 }
4f774513 8238 command_type = OTHER_COMMAND;
7851fe2c 8239 break;
4f774513
JS
8240 case CMD_CLOSE_XRI_CN:
8241 case CMD_ABORT_XRI_CN:
8242 case CMD_ABORT_XRI_CX:
8243 /* words 0-2 memcpy should be 0 rserved */
8244 /* port will send abts */
dcf2a4e0
JS
8245 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8246 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8247 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8248 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8249 } else
8250 fip = 0;
8251
8252 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 8253 /*
dcf2a4e0
JS
8254 * The link is down, or the command was ELS_FIP
8255 * so the fw does not need to send abts
4f774513
JS
8256 * on the wire.
8257 */
8258 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8259 else
8260 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8261 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
8262 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8263 wqe->abort_cmd.rsrvd5 = 0;
8264 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
8265 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8266 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
8267 /*
8268 * The abort handler will send us CMD_ABORT_XRI_CN or
8269 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8270 */
f0d9bccc
JS
8271 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8272 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8273 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8274 LPFC_WQE_LENLOC_NONE);
4f774513
JS
8275 cmnd = CMD_ABORT_XRI_CX;
8276 command_type = OTHER_COMMAND;
8277 xritag = 0;
7851fe2c 8278 break;
6669f9bb 8279 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 8280 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 8281 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
8282 * we re-construct this WQE here based on information in
8283 * iocbq from scratch.
8284 */
8285 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 8286 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 8287 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
8288 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8289 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
8290 LPFC_ABTS_UNSOL_INT) {
8291 /* ABTS sent by initiator to CT exchange, the
8292 * RX_ID field will be filled with the newly
8293 * allocated responder XRI.
8294 */
8295 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8296 iocbq->sli4_xritag);
8297 } else {
8298 /* ABTS sent by responder to CT exchange, the
8299 * RX_ID field will be filled with the responder
8300 * RX_ID from ABTS.
8301 */
8302 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 8303 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 8304 }
6669f9bb
JS
8305 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8306 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
8307
8308 /* Use CT=VPI */
8309 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8310 ndlp->nlp_DID);
8311 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8312 iocbq->iocb.ulpContext);
8313 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 8314 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 8315 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
8316 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8317 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8318 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
8319 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8320 command_type = OTHER_COMMAND;
546fc854
JS
8321 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8322 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8323 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8324 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8325 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8326 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8327 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8328 }
8329
7851fe2c 8330 break;
4f774513
JS
8331 case CMD_XRI_ABORTED_CX:
8332 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
8333 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8334 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8335 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8336 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8337 default:
8338 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8339 "2014 Invalid command 0x%x\n",
8340 iocbq->iocb.ulpCommand);
8341 return IOCB_ERROR;
7851fe2c 8342 break;
4f774513 8343 }
6d368e53 8344
8012cc38
JS
8345 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8346 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8347 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8348 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8349 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8350 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8351 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8352 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
8353 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8354 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8355 wqe->generic.wqe_com.abort_tag = abort_tag;
8356 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8357 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8358 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8359 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
8360 return 0;
8361}
8362
8363/**
8364 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8365 * @phba: Pointer to HBA context object.
8366 * @ring_number: SLI ring number to issue iocb on.
8367 * @piocb: Pointer to command iocb.
8368 * @flag: Flag indicating if this command can be put into txq.
8369 *
8370 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8371 * an iocb command to an HBA with SLI-4 interface spec.
8372 *
8373 * This function is called with hbalock held. The function will return success
8374 * after it successfully submit the iocb to firmware or after adding to the
8375 * txq.
8376 **/
8377static int
8378__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8379 struct lpfc_iocbq *piocb, uint32_t flag)
8380{
8381 struct lpfc_sglq *sglq;
4f774513
JS
8382 union lpfc_wqe wqe;
8383 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
4f774513
JS
8384
8385 if (piocb->sli4_xritag == NO_XRI) {
8386 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 8387 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
8388 sglq = NULL;
8389 else {
2a9bf3d0
JS
8390 if (pring->txq_cnt) {
8391 if (!(flag & SLI_IOCB_RET_IOCB)) {
8392 __lpfc_sli_ringtx_put(phba,
8393 pring, piocb);
8394 return IOCB_SUCCESS;
8395 } else {
8396 return IOCB_BUSY;
8397 }
8398 } else {
6d368e53 8399 sglq = __lpfc_sli_get_sglq(phba, piocb);
2a9bf3d0
JS
8400 if (!sglq) {
8401 if (!(flag & SLI_IOCB_RET_IOCB)) {
8402 __lpfc_sli_ringtx_put(phba,
8403 pring,
8404 piocb);
8405 return IOCB_SUCCESS;
8406 } else
8407 return IOCB_BUSY;
8408 }
8409 }
4f774513
JS
8410 }
8411 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6d368e53
JS
8412 /* These IO's already have an XRI and a mapped sgl. */
8413 sglq = NULL;
4f774513 8414 } else {
6d368e53
JS
8415 /*
8416 * This is a continuation of a commandi,(CX) so this
4f774513
JS
8417 * sglq is on the active list
8418 */
edccdc17 8419 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
8420 if (!sglq)
8421 return IOCB_ERROR;
8422 }
8423
8424 if (sglq) {
6d368e53 8425 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 8426 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 8427 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
8428 return IOCB_ERROR;
8429 }
8430
8431 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8432 return IOCB_ERROR;
8433
341af102
JS
8434 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8435 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5ffc266e
JS
8436 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8437 &wqe))
4f774513
JS
8438 return IOCB_ERROR;
8439 } else {
8440 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8441 return IOCB_ERROR;
8442 }
8443 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8444
8445 return 0;
8446}
8447
8448/**
8449 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8450 *
8451 * This routine wraps the actual lockless version for issusing IOCB function
8452 * pointer from the lpfc_hba struct.
8453 *
8454 * Return codes:
8455 * IOCB_ERROR - Error
8456 * IOCB_SUCCESS - Success
8457 * IOCB_BUSY - Busy
8458 **/
2a9bf3d0 8459int
4f774513
JS
8460__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8461 struct lpfc_iocbq *piocb, uint32_t flag)
8462{
8463 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8464}
8465
8466/**
25985edc 8467 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
8468 * @phba: The hba struct for which this call is being executed.
8469 * @dev_grp: The HBA PCI-Device group number.
8470 *
8471 * This routine sets up the SLI interface API function jump table in @phba
8472 * struct.
8473 * Returns: 0 - success, -ENODEV - failure.
8474 **/
8475int
8476lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8477{
8478
8479 switch (dev_grp) {
8480 case LPFC_PCI_DEV_LP:
8481 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8482 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8483 break;
8484 case LPFC_PCI_DEV_OC:
8485 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8486 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8487 break;
8488 default:
8489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8490 "1419 Invalid HBA PCI-device group: 0x%x\n",
8491 dev_grp);
8492 return -ENODEV;
8493 break;
8494 }
8495 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8496 return 0;
8497}
8498
8499/**
8500 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8501 * @phba: Pointer to HBA context object.
8502 * @pring: Pointer to driver SLI ring object.
8503 * @piocb: Pointer to command iocb.
8504 * @flag: Flag indicating if this command can be put into txq.
8505 *
8506 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8507 * function. This function gets the hbalock and calls
8508 * __lpfc_sli_issue_iocb function and will return the error returned
8509 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8510 * functions which do not hold hbalock.
8511 **/
8512int
8513lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8514 struct lpfc_iocbq *piocb, uint32_t flag)
8515{
ba20c853 8516 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
2a76a283 8517 struct lpfc_sli_ring *pring;
ba20c853
JS
8518 struct lpfc_queue *fpeq;
8519 struct lpfc_eqe *eqe;
4f774513 8520 unsigned long iflags;
2a76a283 8521 int rc, idx;
4f774513 8522
7e56aa25 8523 if (phba->sli_rev == LPFC_SLI_REV4) {
2a76a283
JS
8524 if (piocb->iocb_flag & LPFC_IO_FCP) {
8525 if (unlikely(!phba->sli4_hba.fcp_wq))
8526 return IOCB_ERROR;
8527 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8528 piocb->fcp_wqidx = idx;
8529 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
ba20c853
JS
8530
8531 pring = &phba->sli.ring[ring_number];
8532 spin_lock_irqsave(&pring->ring_lock, iflags);
8533 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8534 flag);
8535 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8536
8537 if (lpfc_fcp_look_ahead) {
8538 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8539
8540 if (atomic_dec_and_test(&fcp_eq_hdl->
8541 fcp_eq_in_use)) {
4f774513 8542
ba20c853
JS
8543 /* Get associated EQ with this index */
8544 fpeq = phba->sli4_hba.hba_eq[idx];
8545
8546 /* Turn off interrupts from this EQ */
8547 lpfc_sli4_eq_clr_intr(fpeq);
8548
8549 /*
8550 * Process all the events on FCP EQ
8551 */
8552 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8553 lpfc_sli4_hba_handle_eqe(phba,
8554 eqe, idx);
8555 fpeq->EQ_processed++;
8556 }
8557
8558 /* Always clear and re-arm the EQ */
8559 lpfc_sli4_eq_release(fpeq,
8560 LPFC_QUEUE_REARM);
8561 }
8562 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8563 }
8564 } else {
8565 pring = &phba->sli.ring[ring_number];
8566 spin_lock_irqsave(&pring->ring_lock, iflags);
8567 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8568 flag);
8569 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8570
2a76a283 8571 }
7e56aa25
JS
8572 } else {
8573 /* For now, SLI2/3 will still use hbalock */
8574 spin_lock_irqsave(&phba->hbalock, iflags);
8575 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8576 spin_unlock_irqrestore(&phba->hbalock, iflags);
8577 }
4f774513
JS
8578 return rc;
8579}
8580
8581/**
8582 * lpfc_extra_ring_setup - Extra ring setup function
8583 * @phba: Pointer to HBA context object.
8584 *
8585 * This function is called while driver attaches with the
8586 * HBA to setup the extra ring. The extra ring is used
8587 * only when driver needs to support target mode functionality
8588 * or IP over FC functionalities.
8589 *
8590 * This function is called with no lock held.
8591 **/
8592static int
8593lpfc_extra_ring_setup( struct lpfc_hba *phba)
8594{
8595 struct lpfc_sli *psli;
8596 struct lpfc_sli_ring *pring;
8597
8598 psli = &phba->sli;
8599
8600 /* Adjust cmd/rsp ring iocb entries more evenly */
8601
8602 /* Take some away from the FCP ring */
8603 pring = &psli->ring[psli->fcp_ring];
7e56aa25
JS
8604 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8605 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8606 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8607 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 8608
a4bc3379
JS
8609 /* and give them to the extra ring */
8610 pring = &psli->ring[psli->extra_ring];
8611
7e56aa25
JS
8612 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8613 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8614 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8615 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
8616
8617 /* Setup default profile for this ring */
8618 pring->iotag_max = 4096;
8619 pring->num_mask = 1;
8620 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
8621 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8622 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
8623 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8624 return 0;
8625}
8626
cb69f7de
JS
8627/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8628 * @phba: Pointer to HBA context object.
8629 * @iocbq: Pointer to iocb object.
8630 *
8631 * The async_event handler calls this routine when it receives
8632 * an ASYNC_STATUS_CN event from the port. The port generates
8633 * this event when an Abort Sequence request to an rport fails
8634 * twice in succession. The abort could be originated by the
8635 * driver or by the port. The ABTS could have been for an ELS
8636 * or FCP IO. The port only generates this event when an ABTS
8637 * fails to complete after one retry.
8638 */
8639static void
8640lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8641 struct lpfc_iocbq *iocbq)
8642{
8643 struct lpfc_nodelist *ndlp = NULL;
8644 uint16_t rpi = 0, vpi = 0;
8645 struct lpfc_vport *vport = NULL;
8646
8647 /* The rpi in the ulpContext is vport-sensitive. */
8648 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8649 rpi = iocbq->iocb.ulpContext;
8650
8651 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8652 "3092 Port generated ABTS async event "
8653 "on vpi %d rpi %d status 0x%x\n",
8654 vpi, rpi, iocbq->iocb.ulpStatus);
8655
8656 vport = lpfc_find_vport_by_vpid(phba, vpi);
8657 if (!vport)
8658 goto err_exit;
8659 ndlp = lpfc_findnode_rpi(vport, rpi);
8660 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8661 goto err_exit;
8662
8663 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8664 lpfc_sli_abts_recover_port(vport, ndlp);
8665 return;
8666
8667 err_exit:
8668 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8669 "3095 Event Context not found, no "
8670 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8671 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8672 vpi, rpi);
8673}
8674
8675/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8676 * @phba: pointer to HBA context object.
8677 * @ndlp: nodelist pointer for the impacted rport.
8678 * @axri: pointer to the wcqe containing the failed exchange.
8679 *
8680 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8681 * port. The port generates this event when an abort exchange request to an
8682 * rport fails twice in succession with no reply. The abort could be originated
8683 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8684 */
8685void
8686lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8687 struct lpfc_nodelist *ndlp,
8688 struct sli4_wcqe_xri_aborted *axri)
8689{
8690 struct lpfc_vport *vport;
5c1db2ac 8691 uint32_t ext_status = 0;
cb69f7de 8692
6b5151fd 8693 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
8694 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8695 "3115 Node Context not found, driver "
8696 "ignoring abts err event\n");
6b5151fd
JS
8697 return;
8698 }
8699
cb69f7de
JS
8700 vport = ndlp->vport;
8701 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8702 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 8703 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
cb69f7de
JS
8704 ndlp->vport->vpi, ndlp->nlp_rpi,
8705 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
8706 bf_get(lpfc_wcqe_xa_status, axri),
8707 axri->parameter);
cb69f7de 8708
5c1db2ac
JS
8709 /*
8710 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8711 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8712 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8713 */
e3d2b802 8714 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
8715 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8716 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
8717 lpfc_sli_abts_recover_port(vport, ndlp);
8718}
8719
e59058c4 8720/**
3621a710 8721 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
8722 * @phba: Pointer to HBA context object.
8723 * @pring: Pointer to driver SLI ring object.
8724 * @iocbq: Pointer to iocb object.
8725 *
8726 * This function is called by the slow ring event handler
8727 * function when there is an ASYNC event iocb in the ring.
8728 * This function is called with no lock held.
8729 * Currently this function handles only temperature related
8730 * ASYNC events. The function decodes the temperature sensor
8731 * event message and posts events for the management applications.
8732 **/
98c9ea5c 8733static void
57127f15
JS
8734lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8735 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8736{
8737 IOCB_t *icmd;
8738 uint16_t evt_code;
57127f15
JS
8739 struct temp_event temp_event_data;
8740 struct Scsi_Host *shost;
a257bf90 8741 uint32_t *iocb_w;
57127f15
JS
8742
8743 icmd = &iocbq->iocb;
8744 evt_code = icmd->un.asyncstat.evt_code;
57127f15 8745
cb69f7de
JS
8746 switch (evt_code) {
8747 case ASYNC_TEMP_WARN:
8748 case ASYNC_TEMP_SAFE:
8749 temp_event_data.data = (uint32_t) icmd->ulpContext;
8750 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8751 if (evt_code == ASYNC_TEMP_WARN) {
8752 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8753 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8754 "0347 Adapter is very hot, please take "
8755 "corrective action. temperature : %d Celsius\n",
8756 (uint32_t) icmd->ulpContext);
8757 } else {
8758 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8759 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8760 "0340 Adapter temperature is OK now. "
8761 "temperature : %d Celsius\n",
8762 (uint32_t) icmd->ulpContext);
8763 }
8764
8765 /* Send temperature change event to applications */
8766 shost = lpfc_shost_from_vport(phba->pport);
8767 fc_host_post_vendor_event(shost, fc_get_event_number(),
8768 sizeof(temp_event_data), (char *) &temp_event_data,
8769 LPFC_NL_VENDOR_ID);
8770 break;
8771 case ASYNC_STATUS_CN:
8772 lpfc_sli_abts_err_handler(phba, iocbq);
8773 break;
8774 default:
a257bf90 8775 iocb_w = (uint32_t *) icmd;
cb69f7de 8776 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 8777 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 8778 " evt_code 0x%x\n"
a257bf90
JS
8779 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8780 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8781 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8782 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 8783 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
8784 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8785 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8786 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8787 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8788
cb69f7de 8789 break;
57127f15 8790 }
57127f15
JS
8791}
8792
8793
e59058c4 8794/**
3621a710 8795 * lpfc_sli_setup - SLI ring setup function
e59058c4
JS
8796 * @phba: Pointer to HBA context object.
8797 *
8798 * lpfc_sli_setup sets up rings of the SLI interface with
8799 * number of iocbs per ring and iotags. This function is
8800 * called while driver attach to the HBA and before the
8801 * interrupts are enabled. So there is no need for locking.
8802 *
8803 * This function always returns 0.
8804 **/
dea3101e 8805int
8806lpfc_sli_setup(struct lpfc_hba *phba)
8807{
ed957684 8808 int i, totiocbsize = 0;
dea3101e 8809 struct lpfc_sli *psli = &phba->sli;
8810 struct lpfc_sli_ring *pring;
8811
2a76a283
JS
8812 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8813 if (phba->sli_rev == LPFC_SLI_REV4)
67d12733 8814 psli->num_rings += phba->cfg_fcp_io_channel;
dea3101e 8815 psli->sli_flag = 0;
8816 psli->fcp_ring = LPFC_FCP_RING;
8817 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 8818 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 8819
604a3e30
JB
8820 psli->iocbq_lookup = NULL;
8821 psli->iocbq_lookup_len = 0;
8822 psli->last_iotag = 0;
8823
dea3101e 8824 for (i = 0; i < psli->num_rings; i++) {
8825 pring = &psli->ring[i];
8826 switch (i) {
8827 case LPFC_FCP_RING: /* ring 0 - FCP */
8828 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8829 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8830 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8831 pring->sli.sli3.numCiocb +=
8832 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8833 pring->sli.sli3.numRiocb +=
8834 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8835 pring->sli.sli3.numCiocb +=
8836 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8837 pring->sli.sli3.numRiocb +=
8838 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8839 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8840 SLI3_IOCB_CMD_SIZE :
8841 SLI2_IOCB_CMD_SIZE;
7e56aa25 8842 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8843 SLI3_IOCB_RSP_SIZE :
8844 SLI2_IOCB_RSP_SIZE;
dea3101e 8845 pring->iotag_ctr = 0;
8846 pring->iotag_max =
92d7f7b0 8847 (phba->cfg_hba_queue_depth * 2);
dea3101e 8848 pring->fast_iotag = pring->iotag_max;
8849 pring->num_mask = 0;
8850 break;
a4bc3379 8851 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 8852 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8853 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8854 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8855 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8856 SLI3_IOCB_CMD_SIZE :
8857 SLI2_IOCB_CMD_SIZE;
7e56aa25 8858 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8859 SLI3_IOCB_RSP_SIZE :
8860 SLI2_IOCB_RSP_SIZE;
2e0fef85 8861 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 8862 pring->num_mask = 0;
8863 break;
8864 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8865 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8866 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8867 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8868 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8869 SLI3_IOCB_CMD_SIZE :
8870 SLI2_IOCB_CMD_SIZE;
7e56aa25 8871 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8872 SLI3_IOCB_RSP_SIZE :
8873 SLI2_IOCB_RSP_SIZE;
dea3101e 8874 pring->fast_iotag = 0;
8875 pring->iotag_ctr = 0;
8876 pring->iotag_max = 4096;
57127f15
JS
8877 pring->lpfc_sli_rcv_async_status =
8878 lpfc_sli_async_event_handler;
6669f9bb 8879 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 8880 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
8881 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8882 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 8883 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 8884 lpfc_els_unsol_event;
dea3101e 8885 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
8886 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8887 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 8888 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 8889 lpfc_els_unsol_event;
dea3101e 8890 pring->prt[2].profile = 0; /* Mask 2 */
8891 /* NameServer Inquiry */
6a9c52cf 8892 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 8893 /* NameServer */
6a9c52cf 8894 pring->prt[2].type = FC_TYPE_CT;
dea3101e 8895 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 8896 lpfc_ct_unsol_event;
dea3101e 8897 pring->prt[3].profile = 0; /* Mask 3 */
8898 /* NameServer response */
6a9c52cf 8899 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 8900 /* NameServer */
6a9c52cf 8901 pring->prt[3].type = FC_TYPE_CT;
dea3101e 8902 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 8903 lpfc_ct_unsol_event;
dea3101e 8904 break;
8905 }
7e56aa25
JS
8906 totiocbsize += (pring->sli.sli3.numCiocb *
8907 pring->sli.sli3.sizeCiocb) +
8908 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 8909 }
ed957684 8910 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 8911 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
8912 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8913 "SLI2 SLIM Data: x%x x%lx\n",
8914 phba->brd_no, totiocbsize,
8915 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 8916 }
cf5bf97e
JW
8917 if (phba->cfg_multi_ring_support == 2)
8918 lpfc_extra_ring_setup(phba);
dea3101e 8919
8920 return 0;
8921}
8922
e59058c4 8923/**
3621a710 8924 * lpfc_sli_queue_setup - Queue initialization function
e59058c4
JS
8925 * @phba: Pointer to HBA context object.
8926 *
8927 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8928 * ring. This function also initializes ring indices of each ring.
8929 * This function is called during the initialization of the SLI
8930 * interface of an HBA.
8931 * This function is called with no lock held and always returns
8932 * 1.
8933 **/
dea3101e 8934int
2e0fef85 8935lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e 8936{
8937 struct lpfc_sli *psli;
8938 struct lpfc_sli_ring *pring;
604a3e30 8939 int i;
dea3101e 8940
8941 psli = &phba->sli;
2e0fef85 8942 spin_lock_irq(&phba->hbalock);
dea3101e 8943 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 8944 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 8945 /* Initialize list headers for txq and txcmplq as double linked lists */
8946 for (i = 0; i < psli->num_rings; i++) {
8947 pring = &psli->ring[i];
8948 pring->ringno = i;
7e56aa25
JS
8949 pring->sli.sli3.next_cmdidx = 0;
8950 pring->sli.sli3.local_getidx = 0;
8951 pring->sli.sli3.cmdidx = 0;
dea3101e 8952 INIT_LIST_HEAD(&pring->txq);
8953 INIT_LIST_HEAD(&pring->txcmplq);
8954 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 8955 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 8956 INIT_LIST_HEAD(&pring->postbufq);
7e56aa25 8957 spin_lock_init(&pring->ring_lock);
dea3101e 8958 }
2e0fef85
JS
8959 spin_unlock_irq(&phba->hbalock);
8960 return 1;
dea3101e 8961}
8962
04c68496
JS
8963/**
8964 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8965 * @phba: Pointer to HBA context object.
8966 *
8967 * This routine flushes the mailbox command subsystem. It will unconditionally
8968 * flush all the mailbox commands in the three possible stages in the mailbox
8969 * command sub-system: pending mailbox command queue; the outstanding mailbox
8970 * command; and completed mailbox command queue. It is caller's responsibility
8971 * to make sure that the driver is in the proper state to flush the mailbox
8972 * command sub-system. Namely, the posting of mailbox commands into the
8973 * pending mailbox command queue from the various clients must be stopped;
8974 * either the HBA is in a state that it will never works on the outstanding
8975 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8976 * mailbox command has been completed.
8977 **/
8978static void
8979lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8980{
8981 LIST_HEAD(completions);
8982 struct lpfc_sli *psli = &phba->sli;
8983 LPFC_MBOXQ_t *pmb;
8984 unsigned long iflag;
8985
8986 /* Flush all the mailbox commands in the mbox system */
8987 spin_lock_irqsave(&phba->hbalock, iflag);
8988 /* The pending mailbox command queue */
8989 list_splice_init(&phba->sli.mboxq, &completions);
8990 /* The outstanding active mailbox command */
8991 if (psli->mbox_active) {
8992 list_add_tail(&psli->mbox_active->list, &completions);
8993 psli->mbox_active = NULL;
8994 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8995 }
8996 /* The completed mailbox command queue */
8997 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8998 spin_unlock_irqrestore(&phba->hbalock, iflag);
8999
9000 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9001 while (!list_empty(&completions)) {
9002 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9003 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9004 if (pmb->mbox_cmpl)
9005 pmb->mbox_cmpl(phba, pmb);
9006 }
9007}
9008
e59058c4 9009/**
3621a710 9010 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
9011 * @vport: Pointer to virtual port object.
9012 *
9013 * lpfc_sli_host_down is called to clean up the resources
9014 * associated with a vport before destroying virtual
9015 * port data structures.
9016 * This function does following operations:
9017 * - Free discovery resources associated with this virtual
9018 * port.
9019 * - Free iocbs associated with this virtual port in
9020 * the txq.
9021 * - Send abort for all iocb commands associated with this
9022 * vport in txcmplq.
9023 *
9024 * This function is called with no lock held and always returns 1.
9025 **/
92d7f7b0
JS
9026int
9027lpfc_sli_host_down(struct lpfc_vport *vport)
9028{
858c9f6c 9029 LIST_HEAD(completions);
92d7f7b0
JS
9030 struct lpfc_hba *phba = vport->phba;
9031 struct lpfc_sli *psli = &phba->sli;
9032 struct lpfc_sli_ring *pring;
9033 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
9034 int i;
9035 unsigned long flags = 0;
9036 uint16_t prev_pring_flag;
9037
9038 lpfc_cleanup_discovery_resources(vport);
9039
9040 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
9041 for (i = 0; i < psli->num_rings; i++) {
9042 pring = &psli->ring[i];
9043 prev_pring_flag = pring->flag;
5e9d9b82
JS
9044 /* Only slow rings */
9045 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9046 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9047 /* Set the lpfc data pending flag */
9048 set_bit(LPFC_DATA_READY, &phba->data_flags);
9049 }
92d7f7b0
JS
9050 /*
9051 * Error everything on the txq since these iocbs have not been
9052 * given to the FW yet.
9053 */
92d7f7b0
JS
9054 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9055 if (iocb->vport != vport)
9056 continue;
858c9f6c 9057 list_move_tail(&iocb->list, &completions);
92d7f7b0 9058 pring->txq_cnt--;
92d7f7b0
JS
9059 }
9060
9061 /* Next issue ABTS for everything on the txcmplq */
9062 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9063 list) {
9064 if (iocb->vport != vport)
9065 continue;
9066 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9067 }
9068
9069 pring->flag = prev_pring_flag;
9070 }
9071
9072 spin_unlock_irqrestore(&phba->hbalock, flags);
9073
a257bf90
JS
9074 /* Cancel all the IOCBs from the completions list */
9075 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9076 IOERR_SLI_DOWN);
92d7f7b0
JS
9077 return 1;
9078}
9079
e59058c4 9080/**
3621a710 9081 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
9082 * @phba: Pointer to HBA context object.
9083 *
9084 * This function cleans up all iocb, buffers, mailbox commands
9085 * while shutting down the HBA. This function is called with no
9086 * lock held and always returns 1.
9087 * This function does the following to cleanup driver resources:
9088 * - Free discovery resources for each virtual port
9089 * - Cleanup any pending fabric iocbs
9090 * - Iterate through the iocb txq and free each entry
9091 * in the list.
9092 * - Free up any buffer posted to the HBA
9093 * - Free mailbox commands in the mailbox queue.
9094 **/
dea3101e 9095int
2e0fef85 9096lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 9097{
2534ba75 9098 LIST_HEAD(completions);
2e0fef85 9099 struct lpfc_sli *psli = &phba->sli;
dea3101e 9100 struct lpfc_sli_ring *pring;
0ff10d46 9101 struct lpfc_dmabuf *buf_ptr;
dea3101e 9102 unsigned long flags = 0;
04c68496
JS
9103 int i;
9104
9105 /* Shutdown the mailbox command sub-system */
618a5230 9106 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 9107
dea3101e 9108 lpfc_hba_down_prep(phba);
9109
92d7f7b0
JS
9110 lpfc_fabric_abort_hba(phba);
9111
2e0fef85 9112 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 9113 for (i = 0; i < psli->num_rings; i++) {
9114 pring = &psli->ring[i];
5e9d9b82
JS
9115 /* Only slow rings */
9116 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9117 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9118 /* Set the lpfc data pending flag */
9119 set_bit(LPFC_DATA_READY, &phba->data_flags);
9120 }
dea3101e 9121
9122 /*
9123 * Error everything on the txq since these iocbs have not been
9124 * given to the FW yet.
9125 */
2534ba75 9126 list_splice_init(&pring->txq, &completions);
dea3101e 9127 pring->txq_cnt = 0;
9128
2534ba75 9129 }
2e0fef85 9130 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 9131
a257bf90
JS
9132 /* Cancel all the IOCBs from the completions list */
9133 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9134 IOERR_SLI_DOWN);
dea3101e 9135
0ff10d46
JS
9136 spin_lock_irqsave(&phba->hbalock, flags);
9137 list_splice_init(&phba->elsbuf, &completions);
9138 phba->elsbuf_cnt = 0;
9139 phba->elsbuf_prev_cnt = 0;
9140 spin_unlock_irqrestore(&phba->hbalock, flags);
9141
9142 while (!list_empty(&completions)) {
9143 list_remove_head(&completions, buf_ptr,
9144 struct lpfc_dmabuf, list);
9145 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9146 kfree(buf_ptr);
9147 }
9148
dea3101e 9149 /* Return any active mbox cmds */
9150 del_timer_sync(&psli->mbox_tmo);
2e0fef85 9151
da0436e9 9152 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 9153 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 9154 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 9155
da0436e9
JS
9156 return 1;
9157}
9158
e59058c4 9159/**
3621a710 9160 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
9161 * @srcp: Source memory pointer.
9162 * @destp: Destination memory pointer.
9163 * @cnt: Number of words required to be copied.
9164 *
9165 * This function is used for copying data between driver memory
9166 * and the SLI memory. This function also changes the endianness
9167 * of each word if native endianness is different from SLI
9168 * endianness. This function can be called with or without
9169 * lock.
9170 **/
dea3101e 9171void
9172lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9173{
9174 uint32_t *src = srcp;
9175 uint32_t *dest = destp;
9176 uint32_t ldata;
9177 int i;
9178
9179 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9180 ldata = *src;
9181 ldata = le32_to_cpu(ldata);
9182 *dest = ldata;
9183 src++;
9184 dest++;
9185 }
9186}
9187
e59058c4 9188
a0c87cbd
JS
9189/**
9190 * lpfc_sli_bemem_bcopy - SLI memory copy function
9191 * @srcp: Source memory pointer.
9192 * @destp: Destination memory pointer.
9193 * @cnt: Number of words required to be copied.
9194 *
9195 * This function is used for copying data between a data structure
9196 * with big endian representation to local endianness.
9197 * This function can be called with or without lock.
9198 **/
9199void
9200lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9201{
9202 uint32_t *src = srcp;
9203 uint32_t *dest = destp;
9204 uint32_t ldata;
9205 int i;
9206
9207 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9208 ldata = *src;
9209 ldata = be32_to_cpu(ldata);
9210 *dest = ldata;
9211 src++;
9212 dest++;
9213 }
9214}
9215
e59058c4 9216/**
3621a710 9217 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
9218 * @phba: Pointer to HBA context object.
9219 * @pring: Pointer to driver SLI ring object.
9220 * @mp: Pointer to driver buffer object.
9221 *
9222 * This function is called with no lock held.
9223 * It always return zero after adding the buffer to the postbufq
9224 * buffer list.
9225 **/
dea3101e 9226int
2e0fef85
JS
9227lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9228 struct lpfc_dmabuf *mp)
dea3101e 9229{
9230 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9231 later */
2e0fef85 9232 spin_lock_irq(&phba->hbalock);
dea3101e 9233 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 9234 pring->postbufq_cnt++;
2e0fef85 9235 spin_unlock_irq(&phba->hbalock);
dea3101e 9236 return 0;
9237}
9238
e59058c4 9239/**
3621a710 9240 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
9241 * @phba: Pointer to HBA context object.
9242 *
9243 * When HBQ is enabled, buffers are searched based on tags. This function
9244 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9245 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9246 * does not conflict with tags of buffer posted for unsolicited events.
9247 * The function returns the allocated tag. The function is called with
9248 * no locks held.
9249 **/
76bb24ef
JS
9250uint32_t
9251lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9252{
9253 spin_lock_irq(&phba->hbalock);
9254 phba->buffer_tag_count++;
9255 /*
9256 * Always set the QUE_BUFTAG_BIT to distiguish between
9257 * a tag assigned by HBQ.
9258 */
9259 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9260 spin_unlock_irq(&phba->hbalock);
9261 return phba->buffer_tag_count;
9262}
9263
e59058c4 9264/**
3621a710 9265 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
9266 * @phba: Pointer to HBA context object.
9267 * @pring: Pointer to driver SLI ring object.
9268 * @tag: Buffer tag.
9269 *
9270 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9271 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9272 * iocb is posted to the response ring with the tag of the buffer.
9273 * This function searches the pring->postbufq list using the tag
9274 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9275 * iocb. If the buffer is found then lpfc_dmabuf object of the
9276 * buffer is returned to the caller else NULL is returned.
9277 * This function is called with no lock held.
9278 **/
76bb24ef
JS
9279struct lpfc_dmabuf *
9280lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9281 uint32_t tag)
9282{
9283 struct lpfc_dmabuf *mp, *next_mp;
9284 struct list_head *slp = &pring->postbufq;
9285
25985edc 9286 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
9287 spin_lock_irq(&phba->hbalock);
9288 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9289 if (mp->buffer_tag == tag) {
9290 list_del_init(&mp->list);
9291 pring->postbufq_cnt--;
9292 spin_unlock_irq(&phba->hbalock);
9293 return mp;
9294 }
9295 }
9296
9297 spin_unlock_irq(&phba->hbalock);
9298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 9299 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
9300 "ring %d Data x%lx x%p x%p x%x\n",
9301 pring->ringno, (unsigned long) tag,
9302 slp->next, slp->prev, pring->postbufq_cnt);
9303
9304 return NULL;
9305}
dea3101e 9306
e59058c4 9307/**
3621a710 9308 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
9309 * @phba: Pointer to HBA context object.
9310 * @pring: Pointer to driver SLI ring object.
9311 * @phys: DMA address of the buffer.
9312 *
9313 * This function searches the buffer list using the dma_address
9314 * of unsolicited event to find the driver's lpfc_dmabuf object
9315 * corresponding to the dma_address. The function returns the
9316 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9317 * This function is called by the ct and els unsolicited event
9318 * handlers to get the buffer associated with the unsolicited
9319 * event.
9320 *
9321 * This function is called with no lock held.
9322 **/
dea3101e 9323struct lpfc_dmabuf *
9324lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9325 dma_addr_t phys)
9326{
9327 struct lpfc_dmabuf *mp, *next_mp;
9328 struct list_head *slp = &pring->postbufq;
9329
25985edc 9330 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 9331 spin_lock_irq(&phba->hbalock);
dea3101e 9332 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9333 if (mp->phys == phys) {
9334 list_del_init(&mp->list);
9335 pring->postbufq_cnt--;
2e0fef85 9336 spin_unlock_irq(&phba->hbalock);
dea3101e 9337 return mp;
9338 }
9339 }
9340
2e0fef85 9341 spin_unlock_irq(&phba->hbalock);
dea3101e 9342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 9343 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 9344 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 9345 pring->ringno, (unsigned long long)phys,
dea3101e 9346 slp->next, slp->prev, pring->postbufq_cnt);
9347 return NULL;
9348}
9349
e59058c4 9350/**
3621a710 9351 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
9352 * @phba: Pointer to HBA context object.
9353 * @cmdiocb: Pointer to driver command iocb object.
9354 * @rspiocb: Pointer to driver response iocb object.
9355 *
9356 * This function is the completion handler for the abort iocbs for
9357 * ELS commands. This function is called from the ELS ring event
9358 * handler with no lock held. This function frees memory resources
9359 * associated with the abort iocb.
9360 **/
dea3101e 9361static void
2e0fef85
JS
9362lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9363 struct lpfc_iocbq *rspiocb)
dea3101e 9364{
2e0fef85 9365 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 9366 uint16_t abort_iotag, abort_context;
ff78d8f9 9367 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
9368
9369 if (irsp->ulpStatus) {
ff78d8f9
JS
9370
9371 /*
9372 * Assume that the port already completed and returned, or
9373 * will return the iocb. Just Log the message.
9374 */
2680eeaa
JS
9375 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9376 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9377
2e0fef85 9378 spin_lock_irq(&phba->hbalock);
45ed1190
JS
9379 if (phba->sli_rev < LPFC_SLI_REV4) {
9380 if (abort_iotag != 0 &&
9381 abort_iotag <= phba->sli.last_iotag)
9382 abort_iocb =
9383 phba->sli.iocbq_lookup[abort_iotag];
9384 } else
9385 /* For sli4 the abort_tag is the XRI,
9386 * so the abort routine puts the iotag of the iocb
9387 * being aborted in the context field of the abort
9388 * IOCB.
9389 */
9390 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 9391
2a9bf3d0
JS
9392 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9393 "0327 Cannot abort els iocb %p "
9394 "with tag %x context %x, abort status %x, "
9395 "abort code %x\n",
9396 abort_iocb, abort_iotag, abort_context,
9397 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 9398
ff78d8f9 9399 spin_unlock_irq(&phba->hbalock);
2680eeaa 9400 }
604a3e30 9401 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 9402 return;
9403}
9404
e59058c4 9405/**
3621a710 9406 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
9407 * @phba: Pointer to HBA context object.
9408 * @cmdiocb: Pointer to driver command iocb object.
9409 * @rspiocb: Pointer to driver response iocb object.
9410 *
9411 * The function is called from SLI ring event handler with no
9412 * lock held. This function is the completion handler for ELS commands
9413 * which are aborted. The function frees memory resources used for
9414 * the aborted ELS commands.
9415 **/
92d7f7b0
JS
9416static void
9417lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9418 struct lpfc_iocbq *rspiocb)
9419{
9420 IOCB_t *irsp = &rspiocb->iocb;
9421
9422 /* ELS cmd tag <ulpIoTag> completes */
9423 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 9424 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 9425 "x%x x%x x%x\n",
e8b62011 9426 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 9427 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
9428 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9429 lpfc_ct_free_iocb(phba, cmdiocb);
9430 else
9431 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
9432 return;
9433}
9434
e59058c4 9435/**
5af5eee7 9436 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
9437 * @phba: Pointer to HBA context object.
9438 * @pring: Pointer to driver SLI ring object.
9439 * @cmdiocb: Pointer to driver command iocb object.
9440 *
5af5eee7
JS
9441 * This function issues an abort iocb for the provided command iocb down to
9442 * the port. Other than the case the outstanding command iocb is an abort
9443 * request, this function issues abort out unconditionally. This function is
9444 * called with hbalock held. The function returns 0 when it fails due to
9445 * memory allocation failure or when the command iocb is an abort request.
e59058c4 9446 **/
5af5eee7
JS
9447static int
9448lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9449 struct lpfc_iocbq *cmdiocb)
dea3101e 9450{
2e0fef85 9451 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 9452 struct lpfc_iocbq *abtsiocbp;
dea3101e 9453 IOCB_t *icmd = NULL;
9454 IOCB_t *iabt = NULL;
5af5eee7 9455 int retval;
7e56aa25 9456 unsigned long iflags;
07951076 9457
92d7f7b0
JS
9458 /*
9459 * There are certain command types we don't want to abort. And we
9460 * don't want to abort commands that are already in the process of
9461 * being aborted.
07951076
JS
9462 */
9463 icmd = &cmdiocb->iocb;
2e0fef85 9464 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
9465 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9466 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
9467 return 0;
9468
dea3101e 9469 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 9470 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 9471 if (abtsiocbp == NULL)
9472 return 0;
dea3101e 9473
07951076 9474 /* This signals the response to set the correct status
341af102 9475 * before calling the completion handler
07951076
JS
9476 */
9477 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9478
dea3101e 9479 iabt = &abtsiocbp->iocb;
07951076
JS
9480 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9481 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 9482 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 9483 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
9484 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9485 }
da0436e9
JS
9486 else
9487 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
9488 iabt->ulpLe = 1;
9489 iabt->ulpClass = icmd->ulpClass;
dea3101e 9490
5ffc266e
JS
9491 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9492 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
341af102
JS
9493 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9494 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9495
2e0fef85 9496 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
9497 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9498 else
9499 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 9500
07951076 9501 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 9502
e8b62011
JS
9503 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9504 "0339 Abort xri x%x, original iotag x%x, "
9505 "abort cmd iotag x%x\n",
2a9bf3d0 9506 iabt->un.acxri.abortIoTag,
e8b62011 9507 iabt->un.acxri.abortContextTag,
2a9bf3d0 9508 abtsiocbp->iotag);
7e56aa25
JS
9509
9510 if (phba->sli_rev == LPFC_SLI_REV4) {
9511 /* Note: both hbalock and ring_lock need to be set here */
9512 spin_lock_irqsave(&pring->ring_lock, iflags);
9513 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9514 abtsiocbp, 0);
9515 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9516 } else {
9517 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9518 abtsiocbp, 0);
9519 }
dea3101e 9520
d7c255b2
JS
9521 if (retval)
9522 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
9523
9524 /*
9525 * Caller to this routine should check for IOCB_ERROR
9526 * and handle it properly. This routine no longer removes
9527 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9528 */
9529 return retval;
9530}
9531
9532/**
9533 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9534 * @phba: Pointer to HBA context object.
9535 * @pring: Pointer to driver SLI ring object.
9536 * @cmdiocb: Pointer to driver command iocb object.
9537 *
9538 * This function issues an abort iocb for the provided command iocb. In case
9539 * of unloading, the abort iocb will not be issued to commands on the ELS
9540 * ring. Instead, the callback function shall be changed to those commands
9541 * so that nothing happens when them finishes. This function is called with
9542 * hbalock held. The function returns 0 when the command iocb is an abort
9543 * request.
9544 **/
9545int
9546lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9547 struct lpfc_iocbq *cmdiocb)
9548{
9549 struct lpfc_vport *vport = cmdiocb->vport;
9550 int retval = IOCB_ERROR;
9551 IOCB_t *icmd = NULL;
9552
9553 /*
9554 * There are certain command types we don't want to abort. And we
9555 * don't want to abort commands that are already in the process of
9556 * being aborted.
9557 */
9558 icmd = &cmdiocb->iocb;
9559 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9560 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9561 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9562 return 0;
9563
9564 /*
9565 * If we're unloading, don't abort iocb on the ELS ring, but change
9566 * the callback so that nothing happens when it finishes.
9567 */
9568 if ((vport->load_flag & FC_UNLOADING) &&
9569 (pring->ringno == LPFC_ELS_RING)) {
9570 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9571 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9572 else
9573 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9574 goto abort_iotag_exit;
9575 }
9576
9577 /* Now, we try to issue the abort to the cmdiocb out */
9578 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9579
07951076 9580abort_iotag_exit:
2e0fef85
JS
9581 /*
9582 * Caller to this routine should check for IOCB_ERROR
9583 * and handle it properly. This routine no longer removes
9584 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 9585 */
2e0fef85 9586 return retval;
dea3101e 9587}
9588
5af5eee7
JS
9589/**
9590 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9591 * @phba: Pointer to HBA context object.
9592 * @pring: Pointer to driver SLI ring object.
9593 *
9594 * This function aborts all iocbs in the given ring and frees all the iocb
9595 * objects in txq. This function issues abort iocbs unconditionally for all
9596 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9597 * to complete before the return of this function. The caller is not required
9598 * to hold any locks.
9599 **/
9600static void
9601lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9602{
9603 LIST_HEAD(completions);
9604 struct lpfc_iocbq *iocb, *next_iocb;
9605
9606 if (pring->ringno == LPFC_ELS_RING)
9607 lpfc_fabric_abort_hba(phba);
9608
9609 spin_lock_irq(&phba->hbalock);
9610
9611 /* Take off all the iocbs on txq for cancelling */
9612 list_splice_init(&pring->txq, &completions);
9613 pring->txq_cnt = 0;
9614
9615 /* Next issue ABTS for everything on the txcmplq */
9616 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9617 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9618
9619 spin_unlock_irq(&phba->hbalock);
9620
9621 /* Cancel all the IOCBs from the completions list */
9622 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9623 IOERR_SLI_ABORTED);
9624}
9625
9626/**
9627 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9628 * @phba: pointer to lpfc HBA data structure.
9629 *
9630 * This routine will abort all pending and outstanding iocbs to an HBA.
9631 **/
9632void
9633lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9634{
9635 struct lpfc_sli *psli = &phba->sli;
9636 struct lpfc_sli_ring *pring;
9637 int i;
9638
9639 for (i = 0; i < psli->num_rings; i++) {
9640 pring = &psli->ring[i];
9641 lpfc_sli_iocb_ring_abort(phba, pring);
9642 }
9643}
9644
e59058c4 9645/**
3621a710 9646 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
9647 * @iocbq: Pointer to driver iocb object.
9648 * @vport: Pointer to driver virtual port object.
9649 * @tgt_id: SCSI ID of the target.
9650 * @lun_id: LUN ID of the scsi device.
9651 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9652 *
3621a710 9653 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
9654 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9655 * 0 if the filtering criteria is met for the given iocb and will return
9656 * 1 if the filtering criteria is not met.
9657 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9658 * given iocb is for the SCSI device specified by vport, tgt_id and
9659 * lun_id parameter.
9660 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9661 * given iocb is for the SCSI target specified by vport and tgt_id
9662 * parameters.
9663 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9664 * given iocb is for the SCSI host associated with the given vport.
9665 * This function is called with no locks held.
9666 **/
dea3101e 9667static int
51ef4c26
JS
9668lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9669 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 9670 lpfc_ctx_cmd ctx_cmd)
dea3101e 9671{
0bd4ca25 9672 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9673 int rc = 1;
9674
0bd4ca25
JSEC
9675 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9676 return rc;
9677
51ef4c26
JS
9678 if (iocbq->vport != vport)
9679 return rc;
9680
0bd4ca25 9681 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 9682
495a714c 9683 if (lpfc_cmd->pCmd == NULL)
dea3101e 9684 return rc;
9685
9686 switch (ctx_cmd) {
9687 case LPFC_CTX_LUN:
495a714c
JS
9688 if ((lpfc_cmd->rdata->pnode) &&
9689 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9690 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 9691 rc = 0;
9692 break;
9693 case LPFC_CTX_TGT:
495a714c
JS
9694 if ((lpfc_cmd->rdata->pnode) &&
9695 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 9696 rc = 0;
9697 break;
dea3101e 9698 case LPFC_CTX_HOST:
9699 rc = 0;
9700 break;
9701 default:
9702 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 9703 __func__, ctx_cmd);
dea3101e 9704 break;
9705 }
9706
9707 return rc;
9708}
9709
e59058c4 9710/**
3621a710 9711 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
9712 * @vport: Pointer to virtual port.
9713 * @tgt_id: SCSI ID of the target.
9714 * @lun_id: LUN ID of the scsi device.
9715 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9716 *
9717 * This function returns number of FCP commands pending for the vport.
9718 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9719 * commands pending on the vport associated with SCSI device specified
9720 * by tgt_id and lun_id parameters.
9721 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9722 * commands pending on the vport associated with SCSI target specified
9723 * by tgt_id parameter.
9724 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9725 * commands pending on the vport.
9726 * This function returns the number of iocbs which satisfy the filter.
9727 * This function is called without any lock held.
9728 **/
dea3101e 9729int
51ef4c26
JS
9730lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9731 lpfc_ctx_cmd ctx_cmd)
dea3101e 9732{
51ef4c26 9733 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9734 struct lpfc_iocbq *iocbq;
9735 int sum, i;
dea3101e 9736
0bd4ca25
JSEC
9737 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9738 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9739
51ef4c26
JS
9740 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9741 ctx_cmd) == 0)
0bd4ca25 9742 sum++;
dea3101e 9743 }
0bd4ca25 9744
dea3101e 9745 return sum;
9746}
9747
e59058c4 9748/**
3621a710 9749 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
9750 * @phba: Pointer to HBA context object
9751 * @cmdiocb: Pointer to command iocb object.
9752 * @rspiocb: Pointer to response iocb object.
9753 *
9754 * This function is called when an aborted FCP iocb completes. This
9755 * function is called by the ring event handler with no lock held.
9756 * This function frees the iocb.
9757 **/
5eb95af0 9758void
2e0fef85
JS
9759lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9760 struct lpfc_iocbq *rspiocb)
5eb95af0 9761{
cb69f7de
JS
9762 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9763 "3096 ABORT_XRI_CN completing on xri x%x "
9764 "original iotag x%x, abort cmd iotag x%x "
9765 "status 0x%x, reason 0x%x\n",
9766 cmdiocb->iocb.un.acxri.abortContextTag,
9767 cmdiocb->iocb.un.acxri.abortIoTag,
9768 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9769 rspiocb->iocb.un.ulpWord[4]);
604a3e30 9770 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
9771 return;
9772}
9773
e59058c4 9774/**
3621a710 9775 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
9776 * @vport: Pointer to virtual port.
9777 * @pring: Pointer to driver SLI ring object.
9778 * @tgt_id: SCSI ID of the target.
9779 * @lun_id: LUN ID of the scsi device.
9780 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9781 *
9782 * This function sends an abort command for every SCSI command
9783 * associated with the given virtual port pending on the ring
9784 * filtered by lpfc_sli_validate_fcp_iocb function.
9785 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9786 * FCP iocbs associated with lun specified by tgt_id and lun_id
9787 * parameters
9788 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9789 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9790 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9791 * FCP iocbs associated with virtual port.
9792 * This function returns number of iocbs it failed to abort.
9793 * This function is called with no locks held.
9794 **/
dea3101e 9795int
51ef4c26
JS
9796lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9797 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 9798{
51ef4c26 9799 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9800 struct lpfc_iocbq *iocbq;
9801 struct lpfc_iocbq *abtsiocb;
dea3101e 9802 IOCB_t *cmd = NULL;
dea3101e 9803 int errcnt = 0, ret_val = 0;
0bd4ca25 9804 int i;
dea3101e 9805
0bd4ca25
JSEC
9806 for (i = 1; i <= phba->sli.last_iotag; i++) {
9807 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9808
51ef4c26 9809 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 9810 abort_cmd) != 0)
dea3101e 9811 continue;
9812
9813 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 9814 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 9815 if (abtsiocb == NULL) {
9816 errcnt++;
9817 continue;
9818 }
dea3101e 9819
0bd4ca25 9820 cmd = &iocbq->iocb;
dea3101e 9821 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9822 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
9823 if (phba->sli_rev == LPFC_SLI_REV4)
9824 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9825 else
9826 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 9827 abtsiocb->iocb.ulpLe = 1;
9828 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 9829 abtsiocb->vport = phba->pport;
dea3101e 9830
5ffc266e
JS
9831 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9832 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
341af102
JS
9833 if (iocbq->iocb_flag & LPFC_IO_FCP)
9834 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9835
2e0fef85 9836 if (lpfc_is_link_up(phba))
dea3101e 9837 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9838 else
9839 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9840
5eb95af0
JSEC
9841 /* Setup callback routine and issue the command. */
9842 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
9843 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9844 abtsiocb, 0);
dea3101e 9845 if (ret_val == IOCB_ERROR) {
604a3e30 9846 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 9847 errcnt++;
9848 continue;
9849 }
9850 }
9851
9852 return errcnt;
9853}
9854
e59058c4 9855/**
3621a710 9856 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
9857 * @phba: Pointer to HBA context object.
9858 * @cmdiocbq: Pointer to command iocb.
9859 * @rspiocbq: Pointer to response iocb.
9860 *
9861 * This function is the completion handler for iocbs issued using
9862 * lpfc_sli_issue_iocb_wait function. This function is called by the
9863 * ring event handler function without any lock held. This function
9864 * can be called from both worker thread context and interrupt
9865 * context. This function also can be called from other thread which
9866 * cleans up the SLI layer objects.
9867 * This function copy the contents of the response iocb to the
9868 * response iocb memory object provided by the caller of
9869 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9870 * sleeps for the iocb completion.
9871 **/
68876920
JSEC
9872static void
9873lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9874 struct lpfc_iocbq *cmdiocbq,
9875 struct lpfc_iocbq *rspiocbq)
dea3101e 9876{
68876920
JSEC
9877 wait_queue_head_t *pdone_q;
9878 unsigned long iflags;
0f65ff68 9879 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9880
2e0fef85 9881 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
9882 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9883 if (cmdiocbq->context2 && rspiocbq)
9884 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9885 &rspiocbq->iocb, sizeof(IOCB_t));
9886
0f65ff68
JS
9887 /* Set the exchange busy flag for task management commands */
9888 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9889 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9890 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9891 cur_iocbq);
9892 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9893 }
9894
68876920 9895 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
9896 if (pdone_q)
9897 wake_up(pdone_q);
858c9f6c 9898 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 9899 return;
9900}
9901
d11e31dd
JS
9902/**
9903 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9904 * @phba: Pointer to HBA context object..
9905 * @piocbq: Pointer to command iocb.
9906 * @flag: Flag to test.
9907 *
9908 * This routine grabs the hbalock and then test the iocb_flag to
9909 * see if the passed in flag is set.
9910 * Returns:
9911 * 1 if flag is set.
9912 * 0 if flag is not set.
9913 **/
9914static int
9915lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9916 struct lpfc_iocbq *piocbq, uint32_t flag)
9917{
9918 unsigned long iflags;
9919 int ret;
9920
9921 spin_lock_irqsave(&phba->hbalock, iflags);
9922 ret = piocbq->iocb_flag & flag;
9923 spin_unlock_irqrestore(&phba->hbalock, iflags);
9924 return ret;
9925
9926}
9927
e59058c4 9928/**
3621a710 9929 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
9930 * @phba: Pointer to HBA context object..
9931 * @pring: Pointer to sli ring.
9932 * @piocb: Pointer to command iocb.
9933 * @prspiocbq: Pointer to response iocb.
9934 * @timeout: Timeout in number of seconds.
9935 *
9936 * This function issues the iocb to firmware and waits for the
9937 * iocb to complete. If the iocb command is not
9938 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9939 * Caller should not free the iocb resources if this function
9940 * returns IOCB_TIMEDOUT.
9941 * The function waits for the iocb completion using an
9942 * non-interruptible wait.
9943 * This function will sleep while waiting for iocb completion.
9944 * So, this function should not be called from any context which
9945 * does not allow sleeping. Due to the same reason, this function
9946 * cannot be called with interrupt disabled.
9947 * This function assumes that the iocb completions occur while
9948 * this function sleep. So, this function cannot be called from
9949 * the thread which process iocb completion for this ring.
9950 * This function clears the iocb_flag of the iocb object before
9951 * issuing the iocb and the iocb completion handler sets this
9952 * flag and wakes this thread when the iocb completes.
9953 * The contents of the response iocb will be copied to prspiocbq
9954 * by the completion handler when the command completes.
9955 * This function returns IOCB_SUCCESS when success.
9956 * This function is called with no lock held.
9957 **/
dea3101e 9958int
2e0fef85 9959lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 9960 uint32_t ring_number,
2e0fef85
JS
9961 struct lpfc_iocbq *piocb,
9962 struct lpfc_iocbq *prspiocbq,
68876920 9963 uint32_t timeout)
dea3101e 9964{
7259f0d0 9965 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
9966 long timeleft, timeout_req = 0;
9967 int retval = IOCB_SUCCESS;
875fbdfe 9968 uint32_t creg_val;
2a9bf3d0 9969 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 9970 /*
68876920
JSEC
9971 * If the caller has provided a response iocbq buffer, then context2
9972 * is NULL or its an error.
dea3101e 9973 */
68876920
JSEC
9974 if (prspiocbq) {
9975 if (piocb->context2)
9976 return IOCB_ERROR;
9977 piocb->context2 = prspiocbq;
dea3101e 9978 }
9979
68876920
JSEC
9980 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9981 piocb->context_un.wait_queue = &done_q;
9982 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 9983
875fbdfe 9984 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
9985 if (lpfc_readl(phba->HCregaddr, &creg_val))
9986 return IOCB_ERROR;
875fbdfe
JSEC
9987 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9988 writel(creg_val, phba->HCregaddr);
9989 readl(phba->HCregaddr); /* flush */
9990 }
9991
2a9bf3d0
JS
9992 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9993 SLI_IOCB_RET_IOCB);
68876920
JSEC
9994 if (retval == IOCB_SUCCESS) {
9995 timeout_req = timeout * HZ;
68876920 9996 timeleft = wait_event_timeout(done_q,
d11e31dd 9997 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 9998 timeout_req);
dea3101e 9999
7054a606
JS
10000 if (piocb->iocb_flag & LPFC_IO_WAKE) {
10001 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 10002 "0331 IOCB wake signaled\n");
7054a606 10003 } else if (timeleft == 0) {
68876920 10004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
10005 "0338 IOCB wait timeout error - no "
10006 "wake response Data x%x\n", timeout);
68876920 10007 retval = IOCB_TIMEDOUT;
7054a606 10008 } else {
68876920 10009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
10010 "0330 IOCB wake NOT set, "
10011 "Data x%x x%lx\n",
68876920
JSEC
10012 timeout, (timeleft / jiffies));
10013 retval = IOCB_TIMEDOUT;
dea3101e 10014 }
2a9bf3d0
JS
10015 } else if (retval == IOCB_BUSY) {
10016 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10017 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10018 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
10019 return retval;
68876920
JSEC
10020 } else {
10021 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 10022 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 10023 retval);
68876920 10024 retval = IOCB_ERROR;
dea3101e 10025 }
10026
875fbdfe 10027 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
10028 if (lpfc_readl(phba->HCregaddr, &creg_val))
10029 return IOCB_ERROR;
875fbdfe
JSEC
10030 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10031 writel(creg_val, phba->HCregaddr);
10032 readl(phba->HCregaddr); /* flush */
10033 }
10034
68876920
JSEC
10035 if (prspiocbq)
10036 piocb->context2 = NULL;
10037
10038 piocb->context_un.wait_queue = NULL;
10039 piocb->iocb_cmpl = NULL;
dea3101e 10040 return retval;
10041}
68876920 10042
e59058c4 10043/**
3621a710 10044 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
10045 * @phba: Pointer to HBA context object.
10046 * @pmboxq: Pointer to driver mailbox object.
10047 * @timeout: Timeout in number of seconds.
10048 *
10049 * This function issues the mailbox to firmware and waits for the
10050 * mailbox command to complete. If the mailbox command is not
10051 * completed within timeout seconds, it returns MBX_TIMEOUT.
10052 * The function waits for the mailbox completion using an
10053 * interruptible wait. If the thread is woken up due to a
10054 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10055 * should not free the mailbox resources, if this function returns
10056 * MBX_TIMEOUT.
10057 * This function will sleep while waiting for mailbox completion.
10058 * So, this function should not be called from any context which
10059 * does not allow sleeping. Due to the same reason, this function
10060 * cannot be called with interrupt disabled.
10061 * This function assumes that the mailbox completion occurs while
10062 * this function sleep. So, this function cannot be called from
10063 * the worker thread which processes mailbox completion.
10064 * This function is called in the context of HBA management
10065 * applications.
10066 * This function returns MBX_SUCCESS when successful.
10067 * This function is called with no lock held.
10068 **/
dea3101e 10069int
2e0fef85 10070lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 10071 uint32_t timeout)
10072{
7259f0d0 10073 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 10074 int retval;
858c9f6c 10075 unsigned long flag;
dea3101e 10076
10077 /* The caller must leave context1 empty. */
98c9ea5c 10078 if (pmboxq->context1)
2e0fef85 10079 return MBX_NOT_FINISHED;
dea3101e 10080
495a714c 10081 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 10082 /* setup wake call as IOCB callback */
10083 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10084 /* setup context field to pass wait_queue pointer to wake function */
10085 pmboxq->context1 = &done_q;
10086
dea3101e 10087 /* now issue the command */
10088 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 10089 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
10090 wait_event_interruptible_timeout(done_q,
10091 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10092 timeout * HZ);
10093
858c9f6c 10094 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 10095 pmboxq->context1 = NULL;
7054a606
JS
10096 /*
10097 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10098 * else do not free the resources.
10099 */
d7c47992 10100 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 10101 retval = MBX_SUCCESS;
d7c47992
JS
10102 lpfc_sli4_swap_str(phba, pmboxq);
10103 } else {
7054a606 10104 retval = MBX_TIMEOUT;
858c9f6c
JS
10105 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10106 }
10107 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 10108 }
10109
dea3101e 10110 return retval;
10111}
10112
e59058c4 10113/**
3772a991 10114 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
10115 * @phba: Pointer to HBA context.
10116 *
3772a991
JS
10117 * This function is called to shutdown the driver's mailbox sub-system.
10118 * It first marks the mailbox sub-system is in a block state to prevent
10119 * the asynchronous mailbox command from issued off the pending mailbox
10120 * command queue. If the mailbox command sub-system shutdown is due to
10121 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10122 * the mailbox sub-system flush routine to forcefully bring down the
10123 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10124 * as with offline or HBA function reset), this routine will wait for the
10125 * outstanding mailbox command to complete before invoking the mailbox
10126 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 10127 **/
3772a991 10128void
618a5230 10129lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 10130{
3772a991 10131 struct lpfc_sli *psli = &phba->sli;
3772a991 10132 unsigned long timeout;
b4c02652 10133
618a5230
JS
10134 if (mbx_action == LPFC_MBX_NO_WAIT) {
10135 /* delay 100ms for port state */
10136 msleep(100);
10137 lpfc_sli_mbox_sys_flush(phba);
10138 return;
10139 }
a183a15f 10140 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 10141
3772a991
JS
10142 spin_lock_irq(&phba->hbalock);
10143 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 10144
3772a991 10145 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
10146 /* Determine how long we might wait for the active mailbox
10147 * command to be gracefully completed by firmware.
10148 */
a183a15f
JS
10149 if (phba->sli.mbox_active)
10150 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10151 phba->sli.mbox_active) *
10152 1000) + jiffies;
10153 spin_unlock_irq(&phba->hbalock);
10154
3772a991
JS
10155 while (phba->sli.mbox_active) {
10156 /* Check active mailbox complete status every 2ms */
10157 msleep(2);
10158 if (time_after(jiffies, timeout))
10159 /* Timeout, let the mailbox flush routine to
10160 * forcefully release active mailbox command
10161 */
10162 break;
10163 }
d7069f09
JS
10164 } else
10165 spin_unlock_irq(&phba->hbalock);
10166
3772a991
JS
10167 lpfc_sli_mbox_sys_flush(phba);
10168}
ed957684 10169
3772a991
JS
10170/**
10171 * lpfc_sli_eratt_read - read sli-3 error attention events
10172 * @phba: Pointer to HBA context.
10173 *
10174 * This function is called to read the SLI3 device error attention registers
10175 * for possible error attention events. The caller must hold the hostlock
10176 * with spin_lock_irq().
10177 *
25985edc 10178 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
10179 * Register and returns 0 otherwise.
10180 **/
10181static int
10182lpfc_sli_eratt_read(struct lpfc_hba *phba)
10183{
10184 uint32_t ha_copy;
b4c02652 10185
3772a991 10186 /* Read chip Host Attention (HA) register */
9940b97b
JS
10187 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10188 goto unplug_err;
10189
3772a991
JS
10190 if (ha_copy & HA_ERATT) {
10191 /* Read host status register to retrieve error event */
9940b97b
JS
10192 if (lpfc_sli_read_hs(phba))
10193 goto unplug_err;
b4c02652 10194
3772a991
JS
10195 /* Check if there is a deferred error condition is active */
10196 if ((HS_FFER1 & phba->work_hs) &&
10197 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 10198 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 10199 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
10200 /* Clear all interrupt enable conditions */
10201 writel(0, phba->HCregaddr);
10202 readl(phba->HCregaddr);
10203 }
10204
10205 /* Set the driver HA work bitmap */
3772a991
JS
10206 phba->work_ha |= HA_ERATT;
10207 /* Indicate polling handles this ERATT */
10208 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
10209 return 1;
10210 }
10211 return 0;
9940b97b
JS
10212
10213unplug_err:
10214 /* Set the driver HS work bitmap */
10215 phba->work_hs |= UNPLUG_ERR;
10216 /* Set the driver HA work bitmap */
10217 phba->work_ha |= HA_ERATT;
10218 /* Indicate polling handles this ERATT */
10219 phba->hba_flag |= HBA_ERATT_HANDLED;
10220 return 1;
b4c02652
JS
10221}
10222
da0436e9
JS
10223/**
10224 * lpfc_sli4_eratt_read - read sli-4 error attention events
10225 * @phba: Pointer to HBA context.
10226 *
10227 * This function is called to read the SLI4 device error attention registers
10228 * for possible error attention events. The caller must hold the hostlock
10229 * with spin_lock_irq().
10230 *
25985edc 10231 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
10232 * Register and returns 0 otherwise.
10233 **/
10234static int
10235lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10236{
10237 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
10238 uint32_t if_type, portsmphr;
10239 struct lpfc_register portstat_reg;
da0436e9 10240
2fcee4bf
JS
10241 /*
10242 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
10243 * registers for error attention. This can be changed later.
10244 */
2fcee4bf
JS
10245 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10246 switch (if_type) {
10247 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
10248 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10249 &uerr_sta_lo) ||
10250 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10251 &uerr_sta_hi)) {
10252 phba->work_hs |= UNPLUG_ERR;
10253 phba->work_ha |= HA_ERATT;
10254 phba->hba_flag |= HBA_ERATT_HANDLED;
10255 return 1;
10256 }
2fcee4bf
JS
10257 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10258 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10260 "1423 HBA Unrecoverable error: "
10261 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10262 "ue_mask_lo_reg=0x%x, "
10263 "ue_mask_hi_reg=0x%x\n",
10264 uerr_sta_lo, uerr_sta_hi,
10265 phba->sli4_hba.ue_mask_lo,
10266 phba->sli4_hba.ue_mask_hi);
10267 phba->work_status[0] = uerr_sta_lo;
10268 phba->work_status[1] = uerr_sta_hi;
10269 phba->work_ha |= HA_ERATT;
10270 phba->hba_flag |= HBA_ERATT_HANDLED;
10271 return 1;
10272 }
10273 break;
10274 case LPFC_SLI_INTF_IF_TYPE_2:
9940b97b
JS
10275 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10276 &portstat_reg.word0) ||
10277 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10278 &portsmphr)){
10279 phba->work_hs |= UNPLUG_ERR;
10280 phba->work_ha |= HA_ERATT;
10281 phba->hba_flag |= HBA_ERATT_HANDLED;
10282 return 1;
10283 }
2fcee4bf
JS
10284 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10285 phba->work_status[0] =
10286 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10287 phba->work_status[1] =
10288 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 10290 "2885 Port Status Event: "
2fcee4bf
JS
10291 "port status reg 0x%x, "
10292 "port smphr reg 0x%x, "
10293 "error 1=0x%x, error 2=0x%x\n",
10294 portstat_reg.word0,
10295 portsmphr,
10296 phba->work_status[0],
10297 phba->work_status[1]);
10298 phba->work_ha |= HA_ERATT;
10299 phba->hba_flag |= HBA_ERATT_HANDLED;
10300 return 1;
10301 }
10302 break;
10303 case LPFC_SLI_INTF_IF_TYPE_1:
10304 default:
a747c9ce 10305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
10306 "2886 HBA Error Attention on unsupported "
10307 "if type %d.", if_type);
a747c9ce 10308 return 1;
da0436e9 10309 }
2fcee4bf 10310
da0436e9
JS
10311 return 0;
10312}
10313
e59058c4 10314/**
3621a710 10315 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
10316 * @phba: Pointer to HBA context.
10317 *
3772a991 10318 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
10319 * error attention register bit for error attention events.
10320 *
25985edc 10321 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
10322 * Register and returns 0 otherwise.
10323 **/
10324int
10325lpfc_sli_check_eratt(struct lpfc_hba *phba)
10326{
10327 uint32_t ha_copy;
10328
10329 /* If somebody is waiting to handle an eratt, don't process it
10330 * here. The brdkill function will do this.
10331 */
10332 if (phba->link_flag & LS_IGNORE_ERATT)
10333 return 0;
10334
10335 /* Check if interrupt handler handles this ERATT */
10336 spin_lock_irq(&phba->hbalock);
10337 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10338 /* Interrupt handler has handled ERATT */
10339 spin_unlock_irq(&phba->hbalock);
10340 return 0;
10341 }
10342
a257bf90
JS
10343 /*
10344 * If there is deferred error attention, do not check for error
10345 * attention
10346 */
10347 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10348 spin_unlock_irq(&phba->hbalock);
10349 return 0;
10350 }
10351
3772a991
JS
10352 /* If PCI channel is offline, don't process it */
10353 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 10354 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10355 return 0;
10356 }
10357
10358 switch (phba->sli_rev) {
10359 case LPFC_SLI_REV2:
10360 case LPFC_SLI_REV3:
10361 /* Read chip Host Attention (HA) register */
10362 ha_copy = lpfc_sli_eratt_read(phba);
10363 break;
da0436e9 10364 case LPFC_SLI_REV4:
2fcee4bf 10365 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
10366 ha_copy = lpfc_sli4_eratt_read(phba);
10367 break;
3772a991
JS
10368 default:
10369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10370 "0299 Invalid SLI revision (%d)\n",
10371 phba->sli_rev);
10372 ha_copy = 0;
10373 break;
9399627f
JS
10374 }
10375 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10376
10377 return ha_copy;
10378}
10379
10380/**
10381 * lpfc_intr_state_check - Check device state for interrupt handling
10382 * @phba: Pointer to HBA context.
10383 *
10384 * This inline routine checks whether a device or its PCI slot is in a state
10385 * that the interrupt should be handled.
10386 *
10387 * This function returns 0 if the device or the PCI slot is in a state that
10388 * interrupt should be handled, otherwise -EIO.
10389 */
10390static inline int
10391lpfc_intr_state_check(struct lpfc_hba *phba)
10392{
10393 /* If the pci channel is offline, ignore all the interrupts */
10394 if (unlikely(pci_channel_offline(phba->pcidev)))
10395 return -EIO;
10396
10397 /* Update device level interrupt statistics */
10398 phba->sli.slistat.sli_intr++;
10399
10400 /* Ignore all interrupts during initialization. */
10401 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10402 return -EIO;
10403
9399627f
JS
10404 return 0;
10405}
10406
10407/**
3772a991 10408 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
10409 * @irq: Interrupt number.
10410 * @dev_id: The device context pointer.
10411 *
9399627f 10412 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10413 * service routine when device with SLI-3 interface spec is enabled with
10414 * MSI-X multi-message interrupt mode and there are slow-path events in
10415 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10416 * interrupt mode, this function is called as part of the device-level
10417 * interrupt handler. When the PCI slot is in error recovery or the HBA
10418 * is undergoing initialization, the interrupt handler will not process
10419 * the interrupt. The link attention and ELS ring attention events are
10420 * handled by the worker thread. The interrupt handler signals the worker
10421 * thread and returns for these events. This function is called without
10422 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
10423 * structures.
10424 *
10425 * This function returns IRQ_HANDLED when interrupt is handled else it
10426 * returns IRQ_NONE.
e59058c4 10427 **/
dea3101e 10428irqreturn_t
3772a991 10429lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 10430{
2e0fef85 10431 struct lpfc_hba *phba;
a747c9ce 10432 uint32_t ha_copy, hc_copy;
dea3101e 10433 uint32_t work_ha_copy;
10434 unsigned long status;
5b75da2f 10435 unsigned long iflag;
dea3101e 10436 uint32_t control;
10437
92d7f7b0 10438 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
10439 struct lpfc_vport *vport;
10440 struct lpfc_nodelist *ndlp;
10441 struct lpfc_dmabuf *mp;
92d7f7b0
JS
10442 LPFC_MBOXQ_t *pmb;
10443 int rc;
10444
dea3101e 10445 /*
10446 * Get the driver's phba structure from the dev_id and
10447 * assume the HBA is not interrupting.
10448 */
9399627f 10449 phba = (struct lpfc_hba *)dev_id;
dea3101e 10450
10451 if (unlikely(!phba))
10452 return IRQ_NONE;
10453
dea3101e 10454 /*
9399627f
JS
10455 * Stuff needs to be attented to when this function is invoked as an
10456 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 10457 */
9399627f 10458 if (phba->intr_type == MSIX) {
3772a991
JS
10459 /* Check device state for handling interrupt */
10460 if (lpfc_intr_state_check(phba))
9399627f
JS
10461 return IRQ_NONE;
10462 /* Need to read HA REG for slow-path events */
5b75da2f 10463 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10464 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10465 goto unplug_error;
9399627f
JS
10466 /* If somebody is waiting to handle an eratt don't process it
10467 * here. The brdkill function will do this.
10468 */
10469 if (phba->link_flag & LS_IGNORE_ERATT)
10470 ha_copy &= ~HA_ERATT;
10471 /* Check the need for handling ERATT in interrupt handler */
10472 if (ha_copy & HA_ERATT) {
10473 if (phba->hba_flag & HBA_ERATT_HANDLED)
10474 /* ERATT polling has handled ERATT */
10475 ha_copy &= ~HA_ERATT;
10476 else
10477 /* Indicate interrupt handler handles ERATT */
10478 phba->hba_flag |= HBA_ERATT_HANDLED;
10479 }
a257bf90
JS
10480
10481 /*
10482 * If there is deferred error attention, do not check for any
10483 * interrupt.
10484 */
10485 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10486 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10487 return IRQ_NONE;
10488 }
10489
9399627f 10490 /* Clear up only attention source related to slow-path */
9940b97b
JS
10491 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10492 goto unplug_error;
10493
a747c9ce
JS
10494 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10495 HC_LAINT_ENA | HC_ERINT_ENA),
10496 phba->HCregaddr);
9399627f
JS
10497 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10498 phba->HAregaddr);
a747c9ce 10499 writel(hc_copy, phba->HCregaddr);
9399627f 10500 readl(phba->HAregaddr); /* flush */
5b75da2f 10501 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10502 } else
10503 ha_copy = phba->ha_copy;
dea3101e 10504
dea3101e 10505 work_ha_copy = ha_copy & phba->work_ha_mask;
10506
9399627f 10507 if (work_ha_copy) {
dea3101e 10508 if (work_ha_copy & HA_LATT) {
10509 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10510 /*
10511 * Turn off Link Attention interrupts
10512 * until CLEAR_LA done
10513 */
5b75da2f 10514 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10515 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
10516 if (lpfc_readl(phba->HCregaddr, &control))
10517 goto unplug_error;
dea3101e 10518 control &= ~HC_LAINT_ENA;
10519 writel(control, phba->HCregaddr);
10520 readl(phba->HCregaddr); /* flush */
5b75da2f 10521 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10522 }
10523 else
10524 work_ha_copy &= ~HA_LATT;
10525 }
10526
9399627f 10527 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
10528 /*
10529 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10530 * the only slow ring.
10531 */
10532 status = (work_ha_copy &
10533 (HA_RXMASK << (4*LPFC_ELS_RING)));
10534 status >>= (4*LPFC_ELS_RING);
10535 if (status & HA_RXMASK) {
5b75da2f 10536 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10537 if (lpfc_readl(phba->HCregaddr, &control))
10538 goto unplug_error;
a58cbd52
JS
10539
10540 lpfc_debugfs_slow_ring_trc(phba,
10541 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10542 control, status,
10543 (uint32_t)phba->sli.slistat.sli_intr);
10544
858c9f6c 10545 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
10546 lpfc_debugfs_slow_ring_trc(phba,
10547 "ISR Disable ring:"
10548 "pwork:x%x hawork:x%x wait:x%x",
10549 phba->work_ha, work_ha_copy,
10550 (uint32_t)((unsigned long)
5e9d9b82 10551 &phba->work_waitq));
a58cbd52 10552
858c9f6c
JS
10553 control &=
10554 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 10555 writel(control, phba->HCregaddr);
10556 readl(phba->HCregaddr); /* flush */
dea3101e 10557 }
a58cbd52
JS
10558 else {
10559 lpfc_debugfs_slow_ring_trc(phba,
10560 "ISR slow ring: pwork:"
10561 "x%x hawork:x%x wait:x%x",
10562 phba->work_ha, work_ha_copy,
10563 (uint32_t)((unsigned long)
5e9d9b82 10564 &phba->work_waitq));
a58cbd52 10565 }
5b75da2f 10566 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10567 }
10568 }
5b75da2f 10569 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 10570 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
10571 if (lpfc_sli_read_hs(phba))
10572 goto unplug_error;
a257bf90
JS
10573 /*
10574 * Check if there is a deferred error condition
10575 * is active
10576 */
10577 if ((HS_FFER1 & phba->work_hs) &&
10578 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
10579 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10580 phba->work_hs)) {
a257bf90
JS
10581 phba->hba_flag |= DEFER_ERATT;
10582 /* Clear all interrupt enable conditions */
10583 writel(0, phba->HCregaddr);
10584 readl(phba->HCregaddr);
10585 }
10586 }
10587
9399627f 10588 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 10589 pmb = phba->sli.mbox_active;
04c68496 10590 pmbox = &pmb->u.mb;
34b02dcd 10591 mbox = phba->mbox;
858c9f6c 10592 vport = pmb->vport;
92d7f7b0
JS
10593
10594 /* First check out the status word */
10595 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10596 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 10597 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
10598 /*
10599 * Stray Mailbox Interrupt, mbxCommand <cmd>
10600 * mbxStatus <status>
10601 */
09372820 10602 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 10603 LOG_SLI,
e8b62011 10604 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
10605 "Interrupt mbxCommand x%x "
10606 "mbxStatus x%x\n",
e8b62011 10607 (vport ? vport->vpi : 0),
92d7f7b0
JS
10608 pmbox->mbxCommand,
10609 pmbox->mbxStatus);
09372820
JS
10610 /* clear mailbox attention bit */
10611 work_ha_copy &= ~HA_MBATT;
10612 } else {
97eab634 10613 phba->sli.mbox_active = NULL;
5b75da2f 10614 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
10615 phba->last_completion_time = jiffies;
10616 del_timer(&phba->sli.mbox_tmo);
09372820
JS
10617 if (pmb->mbox_cmpl) {
10618 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10619 MAILBOX_CMD_SIZE);
7a470277
JS
10620 if (pmb->out_ext_byte_len &&
10621 pmb->context2)
10622 lpfc_sli_pcimem_bcopy(
10623 phba->mbox_ext,
10624 pmb->context2,
10625 pmb->out_ext_byte_len);
09372820
JS
10626 }
10627 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10628 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10629
10630 lpfc_debugfs_disc_trc(vport,
10631 LPFC_DISC_TRC_MBOX_VPORT,
10632 "MBOX dflt rpi: : "
10633 "status:x%x rpi:x%x",
10634 (uint32_t)pmbox->mbxStatus,
10635 pmbox->un.varWords[0], 0);
10636
10637 if (!pmbox->mbxStatus) {
10638 mp = (struct lpfc_dmabuf *)
10639 (pmb->context1);
10640 ndlp = (struct lpfc_nodelist *)
10641 pmb->context2;
10642
10643 /* Reg_LOGIN of dflt RPI was
10644 * successful. new lets get
10645 * rid of the RPI using the
10646 * same mbox buffer.
10647 */
10648 lpfc_unreg_login(phba,
10649 vport->vpi,
10650 pmbox->un.varWords[0],
10651 pmb);
10652 pmb->mbox_cmpl =
10653 lpfc_mbx_cmpl_dflt_rpi;
10654 pmb->context1 = mp;
10655 pmb->context2 = ndlp;
10656 pmb->vport = vport;
58da1ffb
JS
10657 rc = lpfc_sli_issue_mbox(phba,
10658 pmb,
10659 MBX_NOWAIT);
10660 if (rc != MBX_BUSY)
10661 lpfc_printf_log(phba,
10662 KERN_ERR,
10663 LOG_MBOX | LOG_SLI,
d7c255b2 10664 "0350 rc should have"
6a9c52cf 10665 "been MBX_BUSY\n");
3772a991
JS
10666 if (rc != MBX_NOT_FINISHED)
10667 goto send_current_mbox;
09372820 10668 }
858c9f6c 10669 }
5b75da2f
JS
10670 spin_lock_irqsave(
10671 &phba->pport->work_port_lock,
10672 iflag);
09372820
JS
10673 phba->pport->work_port_events &=
10674 ~WORKER_MBOX_TMO;
5b75da2f
JS
10675 spin_unlock_irqrestore(
10676 &phba->pport->work_port_lock,
10677 iflag);
09372820 10678 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 10679 }
97eab634 10680 } else
5b75da2f 10681 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 10682
92d7f7b0
JS
10683 if ((work_ha_copy & HA_MBATT) &&
10684 (phba->sli.mbox_active == NULL)) {
858c9f6c 10685send_current_mbox:
92d7f7b0 10686 /* Process next mailbox command if there is one */
58da1ffb
JS
10687 do {
10688 rc = lpfc_sli_issue_mbox(phba, NULL,
10689 MBX_NOWAIT);
10690 } while (rc == MBX_NOT_FINISHED);
10691 if (rc != MBX_SUCCESS)
10692 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10693 LOG_SLI, "0349 rc should be "
6a9c52cf 10694 "MBX_SUCCESS\n");
92d7f7b0
JS
10695 }
10696
5b75da2f 10697 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10698 phba->work_ha |= work_ha_copy;
5b75da2f 10699 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 10700 lpfc_worker_wake_up(phba);
dea3101e 10701 }
9399627f 10702 return IRQ_HANDLED;
9940b97b
JS
10703unplug_error:
10704 spin_unlock_irqrestore(&phba->hbalock, iflag);
10705 return IRQ_HANDLED;
dea3101e 10706
3772a991 10707} /* lpfc_sli_sp_intr_handler */
9399627f
JS
10708
10709/**
3772a991 10710 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
10711 * @irq: Interrupt number.
10712 * @dev_id: The device context pointer.
10713 *
10714 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10715 * service routine when device with SLI-3 interface spec is enabled with
10716 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10717 * ring event in the HBA. However, when the device is enabled with either
10718 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10719 * device-level interrupt handler. When the PCI slot is in error recovery
10720 * or the HBA is undergoing initialization, the interrupt handler will not
10721 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10722 * the intrrupt context. This function is called without any lock held.
10723 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
10724 *
10725 * This function returns IRQ_HANDLED when interrupt is handled else it
10726 * returns IRQ_NONE.
10727 **/
10728irqreturn_t
3772a991 10729lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
10730{
10731 struct lpfc_hba *phba;
10732 uint32_t ha_copy;
10733 unsigned long status;
5b75da2f 10734 unsigned long iflag;
9399627f
JS
10735
10736 /* Get the driver's phba structure from the dev_id and
10737 * assume the HBA is not interrupting.
10738 */
10739 phba = (struct lpfc_hba *) dev_id;
10740
10741 if (unlikely(!phba))
10742 return IRQ_NONE;
10743
10744 /*
10745 * Stuff needs to be attented to when this function is invoked as an
10746 * individual interrupt handler in MSI-X multi-message interrupt mode
10747 */
10748 if (phba->intr_type == MSIX) {
3772a991
JS
10749 /* Check device state for handling interrupt */
10750 if (lpfc_intr_state_check(phba))
9399627f
JS
10751 return IRQ_NONE;
10752 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
10753 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10754 return IRQ_HANDLED;
9399627f 10755 /* Clear up only attention source related to fast-path */
5b75da2f 10756 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
10757 /*
10758 * If there is deferred error attention, do not check for
10759 * any interrupt.
10760 */
10761 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10762 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10763 return IRQ_NONE;
10764 }
9399627f
JS
10765 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10766 phba->HAregaddr);
10767 readl(phba->HAregaddr); /* flush */
5b75da2f 10768 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10769 } else
10770 ha_copy = phba->ha_copy;
dea3101e 10771
10772 /*
9399627f 10773 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 10774 */
9399627f
JS
10775 ha_copy &= ~(phba->work_ha_mask);
10776
10777 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 10778 status >>= (4*LPFC_FCP_RING);
858c9f6c 10779 if (status & HA_RXMASK)
dea3101e 10780 lpfc_sli_handle_fast_ring_event(phba,
10781 &phba->sli.ring[LPFC_FCP_RING],
10782 status);
a4bc3379
JS
10783
10784 if (phba->cfg_multi_ring_support == 2) {
10785 /*
9399627f
JS
10786 * Process all events on extra ring. Take the optimized path
10787 * for extra ring IO.
a4bc3379 10788 */
9399627f 10789 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 10790 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 10791 if (status & HA_RXMASK) {
a4bc3379
JS
10792 lpfc_sli_handle_fast_ring_event(phba,
10793 &phba->sli.ring[LPFC_EXTRA_RING],
10794 status);
10795 }
10796 }
dea3101e 10797 return IRQ_HANDLED;
3772a991 10798} /* lpfc_sli_fp_intr_handler */
9399627f
JS
10799
10800/**
3772a991 10801 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
10802 * @irq: Interrupt number.
10803 * @dev_id: The device context pointer.
10804 *
3772a991
JS
10805 * This function is the HBA device-level interrupt handler to device with
10806 * SLI-3 interface spec, called from the PCI layer when either MSI or
10807 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10808 * requires driver attention. This function invokes the slow-path interrupt
10809 * attention handling function and fast-path interrupt attention handling
10810 * function in turn to process the relevant HBA attention events. This
10811 * function is called without any lock held. It gets the hbalock to access
10812 * and update SLI data structures.
9399627f
JS
10813 *
10814 * This function returns IRQ_HANDLED when interrupt is handled, else it
10815 * returns IRQ_NONE.
10816 **/
10817irqreturn_t
3772a991 10818lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
10819{
10820 struct lpfc_hba *phba;
10821 irqreturn_t sp_irq_rc, fp_irq_rc;
10822 unsigned long status1, status2;
a747c9ce 10823 uint32_t hc_copy;
9399627f
JS
10824
10825 /*
10826 * Get the driver's phba structure from the dev_id and
10827 * assume the HBA is not interrupting.
10828 */
10829 phba = (struct lpfc_hba *) dev_id;
10830
10831 if (unlikely(!phba))
10832 return IRQ_NONE;
10833
3772a991
JS
10834 /* Check device state for handling interrupt */
10835 if (lpfc_intr_state_check(phba))
9399627f
JS
10836 return IRQ_NONE;
10837
10838 spin_lock(&phba->hbalock);
9940b97b
JS
10839 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10840 spin_unlock(&phba->hbalock);
10841 return IRQ_HANDLED;
10842 }
10843
9399627f
JS
10844 if (unlikely(!phba->ha_copy)) {
10845 spin_unlock(&phba->hbalock);
10846 return IRQ_NONE;
10847 } else if (phba->ha_copy & HA_ERATT) {
10848 if (phba->hba_flag & HBA_ERATT_HANDLED)
10849 /* ERATT polling has handled ERATT */
10850 phba->ha_copy &= ~HA_ERATT;
10851 else
10852 /* Indicate interrupt handler handles ERATT */
10853 phba->hba_flag |= HBA_ERATT_HANDLED;
10854 }
10855
a257bf90
JS
10856 /*
10857 * If there is deferred error attention, do not check for any interrupt.
10858 */
10859 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 10860 spin_unlock(&phba->hbalock);
a257bf90
JS
10861 return IRQ_NONE;
10862 }
10863
9399627f 10864 /* Clear attention sources except link and error attentions */
9940b97b
JS
10865 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10866 spin_unlock(&phba->hbalock);
10867 return IRQ_HANDLED;
10868 }
a747c9ce
JS
10869 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10870 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10871 phba->HCregaddr);
9399627f 10872 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 10873 writel(hc_copy, phba->HCregaddr);
9399627f
JS
10874 readl(phba->HAregaddr); /* flush */
10875 spin_unlock(&phba->hbalock);
10876
10877 /*
10878 * Invokes slow-path host attention interrupt handling as appropriate.
10879 */
10880
10881 /* status of events with mailbox and link attention */
10882 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10883
10884 /* status of events with ELS ring */
10885 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10886 status2 >>= (4*LPFC_ELS_RING);
10887
10888 if (status1 || (status2 & HA_RXMASK))
3772a991 10889 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
10890 else
10891 sp_irq_rc = IRQ_NONE;
10892
10893 /*
10894 * Invoke fast-path host attention interrupt handling as appropriate.
10895 */
10896
10897 /* status of events with FCP ring */
10898 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10899 status1 >>= (4*LPFC_FCP_RING);
10900
10901 /* status of events with extra ring */
10902 if (phba->cfg_multi_ring_support == 2) {
10903 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10904 status2 >>= (4*LPFC_EXTRA_RING);
10905 } else
10906 status2 = 0;
10907
10908 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 10909 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
10910 else
10911 fp_irq_rc = IRQ_NONE;
dea3101e 10912
9399627f
JS
10913 /* Return device-level interrupt handling status */
10914 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 10915} /* lpfc_sli_intr_handler */
4f774513
JS
10916
10917/**
10918 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10919 * @phba: pointer to lpfc hba data structure.
10920 *
10921 * This routine is invoked by the worker thread to process all the pending
10922 * SLI4 FCP abort XRI events.
10923 **/
10924void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10925{
10926 struct lpfc_cq_event *cq_event;
10927
10928 /* First, declare the fcp xri abort event has been handled */
10929 spin_lock_irq(&phba->hbalock);
10930 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10931 spin_unlock_irq(&phba->hbalock);
10932 /* Now, handle all the fcp xri abort events */
10933 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10934 /* Get the first event from the head of the event queue */
10935 spin_lock_irq(&phba->hbalock);
10936 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10937 cq_event, struct lpfc_cq_event, list);
10938 spin_unlock_irq(&phba->hbalock);
10939 /* Notify aborted XRI for FCP work queue */
10940 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10941 /* Free the event processed back to the free pool */
10942 lpfc_sli4_cq_event_release(phba, cq_event);
10943 }
10944}
10945
10946/**
10947 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10948 * @phba: pointer to lpfc hba data structure.
10949 *
10950 * This routine is invoked by the worker thread to process all the pending
10951 * SLI4 els abort xri events.
10952 **/
10953void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10954{
10955 struct lpfc_cq_event *cq_event;
10956
10957 /* First, declare the els xri abort event has been handled */
10958 spin_lock_irq(&phba->hbalock);
10959 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10960 spin_unlock_irq(&phba->hbalock);
10961 /* Now, handle all the els xri abort events */
10962 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10963 /* Get the first event from the head of the event queue */
10964 spin_lock_irq(&phba->hbalock);
10965 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10966 cq_event, struct lpfc_cq_event, list);
10967 spin_unlock_irq(&phba->hbalock);
10968 /* Notify aborted XRI for ELS work queue */
10969 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10970 /* Free the event processed back to the free pool */
10971 lpfc_sli4_cq_event_release(phba, cq_event);
10972 }
10973}
10974
341af102
JS
10975/**
10976 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10977 * @phba: pointer to lpfc hba data structure
10978 * @pIocbIn: pointer to the rspiocbq
10979 * @pIocbOut: pointer to the cmdiocbq
10980 * @wcqe: pointer to the complete wcqe
10981 *
10982 * This routine transfers the fields of a command iocbq to a response iocbq
10983 * by copying all the IOCB fields from command iocbq and transferring the
10984 * completion status information from the complete wcqe.
10985 **/
4f774513 10986static void
341af102
JS
10987lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10988 struct lpfc_iocbq *pIocbIn,
4f774513
JS
10989 struct lpfc_iocbq *pIocbOut,
10990 struct lpfc_wcqe_complete *wcqe)
10991{
341af102 10992 unsigned long iflags;
acd6859b 10993 uint32_t status;
4f774513
JS
10994 size_t offset = offsetof(struct lpfc_iocbq, iocb);
10995
10996 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10997 sizeof(struct lpfc_iocbq) - offset);
4f774513 10998 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
10999 status = bf_get(lpfc_wcqe_c_status, wcqe);
11000 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
11001 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11002 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11003 pIocbIn->iocb.un.fcpi.fcpi_parm =
11004 pIocbOut->iocb.un.fcpi.fcpi_parm -
11005 wcqe->total_data_placed;
11006 else
11007 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 11008 else {
4f774513 11009 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e
JS
11010 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
11011 }
341af102 11012
acd6859b
JS
11013 /* Convert BG errors for completion status */
11014 if (status == CQE_STATUS_DI_ERROR) {
11015 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11016
11017 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11018 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11019 else
11020 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11021
11022 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11023 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11024 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11025 BGS_GUARD_ERR_MASK;
11026 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11027 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11028 BGS_APPTAG_ERR_MASK;
11029 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11030 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11031 BGS_REFTAG_ERR_MASK;
11032
11033 /* Check to see if there was any good data before the error */
11034 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11035 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11036 BGS_HI_WATER_MARK_PRESENT_MASK;
11037 pIocbIn->iocb.unsli3.sli3_bg.bghm =
11038 wcqe->total_data_placed;
11039 }
11040
11041 /*
11042 * Set ALL the error bits to indicate we don't know what
11043 * type of error it is.
11044 */
11045 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11046 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11047 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11048 BGS_GUARD_ERR_MASK);
11049 }
11050
341af102
JS
11051 /* Pick up HBA exchange busy condition */
11052 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11053 spin_lock_irqsave(&phba->hbalock, iflags);
11054 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11055 spin_unlock_irqrestore(&phba->hbalock, iflags);
11056 }
4f774513
JS
11057}
11058
45ed1190
JS
11059/**
11060 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11061 * @phba: Pointer to HBA context object.
11062 * @wcqe: Pointer to work-queue completion queue entry.
11063 *
11064 * This routine handles an ELS work-queue completion event and construct
11065 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11066 * discovery engine to handle.
11067 *
11068 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11069 **/
11070static struct lpfc_iocbq *
11071lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11072 struct lpfc_iocbq *irspiocbq)
11073{
11074 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11075 struct lpfc_iocbq *cmdiocbq;
11076 struct lpfc_wcqe_complete *wcqe;
11077 unsigned long iflags;
11078
11079 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 11080 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
11081 pring->stats.iocb_event++;
11082 /* Look up the ELS command IOCB and create pseudo response IOCB */
11083 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11084 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11085 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
11086
11087 if (unlikely(!cmdiocbq)) {
11088 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11089 "0386 ELS complete with no corresponding "
11090 "cmdiocb: iotag (%d)\n",
11091 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11092 lpfc_sli_release_iocbq(phba, irspiocbq);
11093 return NULL;
11094 }
11095
11096 /* Fake the irspiocbq and copy necessary response information */
341af102 11097 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
11098
11099 return irspiocbq;
11100}
11101
04c68496
JS
11102/**
11103 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11104 * @phba: Pointer to HBA context object.
11105 * @cqe: Pointer to mailbox completion queue entry.
11106 *
11107 * This routine process a mailbox completion queue entry with asynchrous
11108 * event.
11109 *
11110 * Return: true if work posted to worker thread, otherwise false.
11111 **/
11112static bool
11113lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11114{
11115 struct lpfc_cq_event *cq_event;
11116 unsigned long iflags;
11117
11118 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11119 "0392 Async Event: word0:x%x, word1:x%x, "
11120 "word2:x%x, word3:x%x\n", mcqe->word0,
11121 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11122
11123 /* Allocate a new internal CQ_EVENT entry */
11124 cq_event = lpfc_sli4_cq_event_alloc(phba);
11125 if (!cq_event) {
11126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11127 "0394 Failed to allocate CQ_EVENT entry\n");
11128 return false;
11129 }
11130
11131 /* Move the CQE into an asynchronous event entry */
11132 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11133 spin_lock_irqsave(&phba->hbalock, iflags);
11134 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11135 /* Set the async event flag */
11136 phba->hba_flag |= ASYNC_EVENT;
11137 spin_unlock_irqrestore(&phba->hbalock, iflags);
11138
11139 return true;
11140}
11141
11142/**
11143 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11144 * @phba: Pointer to HBA context object.
11145 * @cqe: Pointer to mailbox completion queue entry.
11146 *
11147 * This routine process a mailbox completion queue entry with mailbox
11148 * completion event.
11149 *
11150 * Return: true if work posted to worker thread, otherwise false.
11151 **/
11152static bool
11153lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11154{
11155 uint32_t mcqe_status;
11156 MAILBOX_t *mbox, *pmbox;
11157 struct lpfc_mqe *mqe;
11158 struct lpfc_vport *vport;
11159 struct lpfc_nodelist *ndlp;
11160 struct lpfc_dmabuf *mp;
11161 unsigned long iflags;
11162 LPFC_MBOXQ_t *pmb;
11163 bool workposted = false;
11164 int rc;
11165
11166 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11167 if (!bf_get(lpfc_trailer_completed, mcqe))
11168 goto out_no_mqe_complete;
11169
11170 /* Get the reference to the active mbox command */
11171 spin_lock_irqsave(&phba->hbalock, iflags);
11172 pmb = phba->sli.mbox_active;
11173 if (unlikely(!pmb)) {
11174 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11175 "1832 No pending MBOX command to handle\n");
11176 spin_unlock_irqrestore(&phba->hbalock, iflags);
11177 goto out_no_mqe_complete;
11178 }
11179 spin_unlock_irqrestore(&phba->hbalock, iflags);
11180 mqe = &pmb->u.mqe;
11181 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11182 mbox = phba->mbox;
11183 vport = pmb->vport;
11184
11185 /* Reset heartbeat timer */
11186 phba->last_completion_time = jiffies;
11187 del_timer(&phba->sli.mbox_tmo);
11188
11189 /* Move mbox data to caller's mailbox region, do endian swapping */
11190 if (pmb->mbox_cmpl && mbox)
11191 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 11192
73d91e50
JS
11193 /*
11194 * For mcqe errors, conditionally move a modified error code to
11195 * the mbox so that the error will not be missed.
11196 */
11197 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11198 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11199 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11200 bf_set(lpfc_mqe_status, mqe,
11201 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11202 }
04c68496
JS
11203 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11204 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11205 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11206 "MBOX dflt rpi: status:x%x rpi:x%x",
11207 mcqe_status,
11208 pmbox->un.varWords[0], 0);
11209 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11210 mp = (struct lpfc_dmabuf *)(pmb->context1);
11211 ndlp = (struct lpfc_nodelist *)pmb->context2;
11212 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11213 * RID of the PPI using the same mbox buffer.
11214 */
11215 lpfc_unreg_login(phba, vport->vpi,
11216 pmbox->un.varWords[0], pmb);
11217 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11218 pmb->context1 = mp;
11219 pmb->context2 = ndlp;
11220 pmb->vport = vport;
11221 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11222 if (rc != MBX_BUSY)
11223 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11224 LOG_SLI, "0385 rc should "
11225 "have been MBX_BUSY\n");
11226 if (rc != MBX_NOT_FINISHED)
11227 goto send_current_mbox;
11228 }
11229 }
11230 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11231 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11232 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11233
11234 /* There is mailbox completion work to do */
11235 spin_lock_irqsave(&phba->hbalock, iflags);
11236 __lpfc_mbox_cmpl_put(phba, pmb);
11237 phba->work_ha |= HA_MBATT;
11238 spin_unlock_irqrestore(&phba->hbalock, iflags);
11239 workposted = true;
11240
11241send_current_mbox:
11242 spin_lock_irqsave(&phba->hbalock, iflags);
11243 /* Release the mailbox command posting token */
11244 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11245 /* Setting active mailbox pointer need to be in sync to flag clear */
11246 phba->sli.mbox_active = NULL;
11247 spin_unlock_irqrestore(&phba->hbalock, iflags);
11248 /* Wake up worker thread to post the next pending mailbox command */
11249 lpfc_worker_wake_up(phba);
11250out_no_mqe_complete:
11251 if (bf_get(lpfc_trailer_consumed, mcqe))
11252 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11253 return workposted;
11254}
11255
11256/**
11257 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11258 * @phba: Pointer to HBA context object.
11259 * @cqe: Pointer to mailbox completion queue entry.
11260 *
11261 * This routine process a mailbox completion queue entry, it invokes the
11262 * proper mailbox complete handling or asynchrous event handling routine
11263 * according to the MCQE's async bit.
11264 *
11265 * Return: true if work posted to worker thread, otherwise false.
11266 **/
11267static bool
11268lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11269{
11270 struct lpfc_mcqe mcqe;
11271 bool workposted;
11272
11273 /* Copy the mailbox MCQE and convert endian order as needed */
11274 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11275
11276 /* Invoke the proper event handling routine */
11277 if (!bf_get(lpfc_trailer_async, &mcqe))
11278 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11279 else
11280 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11281 return workposted;
11282}
11283
4f774513
JS
11284/**
11285 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11286 * @phba: Pointer to HBA context object.
2a76a283 11287 * @cq: Pointer to associated CQ
4f774513
JS
11288 * @wcqe: Pointer to work-queue completion queue entry.
11289 *
11290 * This routine handles an ELS work-queue completion event.
11291 *
11292 * Return: true if work posted to worker thread, otherwise false.
11293 **/
11294static bool
2a76a283 11295lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11296 struct lpfc_wcqe_complete *wcqe)
11297{
4f774513
JS
11298 struct lpfc_iocbq *irspiocbq;
11299 unsigned long iflags;
2a76a283 11300 struct lpfc_sli_ring *pring = cq->pring;
4f774513 11301
45ed1190 11302 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
11303 irspiocbq = lpfc_sli_get_iocbq(phba);
11304 if (!irspiocbq) {
11305 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
11306 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11307 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11308 pring->txq_cnt, phba->iocb_cnt,
11309 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
11310 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
45ed1190 11311 return false;
4f774513 11312 }
4f774513 11313
45ed1190
JS
11314 /* Save off the slow-path queue event for work thread to process */
11315 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 11316 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 11317 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
11318 &phba->sli4_hba.sp_queue_event);
11319 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 11320 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 11321
45ed1190 11322 return true;
4f774513
JS
11323}
11324
11325/**
11326 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11327 * @phba: Pointer to HBA context object.
11328 * @wcqe: Pointer to work-queue completion queue entry.
11329 *
11330 * This routine handles slow-path WQ entry comsumed event by invoking the
11331 * proper WQ release routine to the slow-path WQ.
11332 **/
11333static void
11334lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11335 struct lpfc_wcqe_release *wcqe)
11336{
2e90f4b5
JS
11337 /* sanity check on queue memory */
11338 if (unlikely(!phba->sli4_hba.els_wq))
11339 return;
4f774513
JS
11340 /* Check for the slow-path ELS work queue */
11341 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11342 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11343 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11344 else
11345 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11346 "2579 Slow-path wqe consume event carries "
11347 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11348 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11349 phba->sli4_hba.els_wq->queue_id);
11350}
11351
11352/**
11353 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11354 * @phba: Pointer to HBA context object.
11355 * @cq: Pointer to a WQ completion queue.
11356 * @wcqe: Pointer to work-queue completion queue entry.
11357 *
11358 * This routine handles an XRI abort event.
11359 *
11360 * Return: true if work posted to worker thread, otherwise false.
11361 **/
11362static bool
11363lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11364 struct lpfc_queue *cq,
11365 struct sli4_wcqe_xri_aborted *wcqe)
11366{
11367 bool workposted = false;
11368 struct lpfc_cq_event *cq_event;
11369 unsigned long iflags;
11370
11371 /* Allocate a new internal CQ_EVENT entry */
11372 cq_event = lpfc_sli4_cq_event_alloc(phba);
11373 if (!cq_event) {
11374 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11375 "0602 Failed to allocate CQ_EVENT entry\n");
11376 return false;
11377 }
11378
11379 /* Move the CQE into the proper xri abort event list */
11380 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11381 switch (cq->subtype) {
11382 case LPFC_FCP:
11383 spin_lock_irqsave(&phba->hbalock, iflags);
11384 list_add_tail(&cq_event->list,
11385 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11386 /* Set the fcp xri abort event flag */
11387 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11388 spin_unlock_irqrestore(&phba->hbalock, iflags);
11389 workposted = true;
11390 break;
11391 case LPFC_ELS:
11392 spin_lock_irqsave(&phba->hbalock, iflags);
11393 list_add_tail(&cq_event->list,
11394 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11395 /* Set the els xri abort event flag */
11396 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11397 spin_unlock_irqrestore(&phba->hbalock, iflags);
11398 workposted = true;
11399 break;
11400 default:
11401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11402 "0603 Invalid work queue CQE subtype (x%x)\n",
11403 cq->subtype);
11404 workposted = false;
11405 break;
11406 }
11407 return workposted;
11408}
11409
4f774513
JS
11410/**
11411 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11412 * @phba: Pointer to HBA context object.
11413 * @rcqe: Pointer to receive-queue completion queue entry.
11414 *
11415 * This routine process a receive-queue completion queue entry.
11416 *
11417 * Return: true if work posted to worker thread, otherwise false.
11418 **/
11419static bool
4d9ab994 11420lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 11421{
4f774513
JS
11422 bool workposted = false;
11423 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11424 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11425 struct hbq_dmabuf *dma_buf;
7851fe2c 11426 uint32_t status, rq_id;
4f774513
JS
11427 unsigned long iflags;
11428
2e90f4b5
JS
11429 /* sanity check on queue memory */
11430 if (unlikely(!hrq) || unlikely(!drq))
11431 return workposted;
11432
7851fe2c
JS
11433 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11434 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11435 else
11436 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11437 if (rq_id != hrq->queue_id)
4f774513
JS
11438 goto out;
11439
4d9ab994 11440 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
11441 switch (status) {
11442 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11443 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11444 "2537 Receive Frame Truncated!!\n");
b84daac9 11445 hrq->RQ_buf_trunc++;
4f774513 11446 case FC_STATUS_RQ_SUCCESS:
5ffc266e 11447 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
11448 spin_lock_irqsave(&phba->hbalock, iflags);
11449 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11450 if (!dma_buf) {
b84daac9 11451 hrq->RQ_no_buf_found++;
4f774513
JS
11452 spin_unlock_irqrestore(&phba->hbalock, iflags);
11453 goto out;
11454 }
b84daac9 11455 hrq->RQ_rcv_buf++;
4d9ab994 11456 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
4f774513 11457 /* save off the frame for the word thread to process */
4d9ab994 11458 list_add_tail(&dma_buf->cq_event.list,
45ed1190 11459 &phba->sli4_hba.sp_queue_event);
4f774513 11460 /* Frame received */
45ed1190 11461 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
11462 spin_unlock_irqrestore(&phba->hbalock, iflags);
11463 workposted = true;
11464 break;
11465 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11466 case FC_STATUS_INSUFF_BUF_FRM_DISC:
b84daac9 11467 hrq->RQ_no_posted_buf++;
4f774513
JS
11468 /* Post more buffers if possible */
11469 spin_lock_irqsave(&phba->hbalock, iflags);
11470 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11471 spin_unlock_irqrestore(&phba->hbalock, iflags);
11472 workposted = true;
11473 break;
11474 }
11475out:
11476 return workposted;
4f774513
JS
11477}
11478
4d9ab994
JS
11479/**
11480 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11481 * @phba: Pointer to HBA context object.
11482 * @cq: Pointer to the completion queue.
11483 * @wcqe: Pointer to a completion queue entry.
11484 *
25985edc 11485 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
11486 * entry.
11487 *
11488 * Return: true if work posted to worker thread, otherwise false.
11489 **/
11490static bool
11491lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11492 struct lpfc_cqe *cqe)
11493{
45ed1190 11494 struct lpfc_cqe cqevt;
4d9ab994
JS
11495 bool workposted = false;
11496
11497 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 11498 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
11499
11500 /* Check and process for different type of WCQE and dispatch */
45ed1190 11501 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 11502 case CQE_CODE_COMPL_WQE:
45ed1190 11503 /* Process the WQ/RQ complete event */
bc73905a 11504 phba->last_completion_time = jiffies;
2a76a283 11505 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 11506 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
11507 break;
11508 case CQE_CODE_RELEASE_WQE:
11509 /* Process the WQ release event */
11510 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 11511 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
11512 break;
11513 case CQE_CODE_XRI_ABORTED:
11514 /* Process the WQ XRI abort event */
bc73905a 11515 phba->last_completion_time = jiffies;
4d9ab994 11516 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 11517 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
11518 break;
11519 case CQE_CODE_RECEIVE:
7851fe2c 11520 case CQE_CODE_RECEIVE_V1:
4d9ab994 11521 /* Process the RQ event */
bc73905a 11522 phba->last_completion_time = jiffies;
4d9ab994 11523 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 11524 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
11525 break;
11526 default:
11527 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11528 "0388 Not a valid WCQE code: x%x\n",
45ed1190 11529 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
11530 break;
11531 }
11532 return workposted;
11533}
11534
4f774513
JS
11535/**
11536 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11537 * @phba: Pointer to HBA context object.
11538 * @eqe: Pointer to fast-path event queue entry.
11539 *
11540 * This routine process a event queue entry from the slow-path event queue.
11541 * It will check the MajorCode and MinorCode to determine this is for a
11542 * completion event on a completion queue, if not, an error shall be logged
11543 * and just return. Otherwise, it will get to the corresponding completion
11544 * queue and process all the entries on that completion queue, rearm the
11545 * completion queue, and then return.
11546 *
11547 **/
11548static void
67d12733
JS
11549lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11550 struct lpfc_queue *speq)
4f774513 11551{
67d12733 11552 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
11553 struct lpfc_cqe *cqe;
11554 bool workposted = false;
11555 int ecount = 0;
11556 uint16_t cqid;
11557
4f774513 11558 /* Get the reference to the corresponding CQ */
cb5172ea 11559 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 11560
4f774513
JS
11561 list_for_each_entry(childq, &speq->child_list, list) {
11562 if (childq->queue_id == cqid) {
11563 cq = childq;
11564 break;
11565 }
11566 }
11567 if (unlikely(!cq)) {
75baf696
JS
11568 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11570 "0365 Slow-path CQ identifier "
11571 "(%d) does not exist\n", cqid);
4f774513
JS
11572 return;
11573 }
11574
11575 /* Process all the entries to the CQ */
11576 switch (cq->type) {
11577 case LPFC_MCQ:
11578 while ((cqe = lpfc_sli4_cq_get(cq))) {
11579 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
73d91e50 11580 if (!(++ecount % cq->entry_repost))
4f774513 11581 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
b84daac9 11582 cq->CQ_mbox++;
4f774513
JS
11583 }
11584 break;
11585 case LPFC_WCQ:
11586 while ((cqe = lpfc_sli4_cq_get(cq))) {
0558056c
JS
11587 if (cq->subtype == LPFC_FCP)
11588 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11589 cqe);
11590 else
11591 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11592 cqe);
73d91e50 11593 if (!(++ecount % cq->entry_repost))
4f774513
JS
11594 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11595 }
b84daac9
JS
11596
11597 /* Track the max number of CQEs processed in 1 EQ */
11598 if (ecount > cq->CQ_max_cqe)
11599 cq->CQ_max_cqe = ecount;
4f774513
JS
11600 break;
11601 default:
11602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11603 "0370 Invalid completion queue type (%d)\n",
11604 cq->type);
11605 return;
11606 }
11607
11608 /* Catch the no cq entry condition, log an error */
11609 if (unlikely(ecount == 0))
11610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11611 "0371 No entry from the CQ: identifier "
11612 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11613
11614 /* In any case, flash and re-arm the RCQ */
11615 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11616
11617 /* wake up worker thread if there are works to be done */
11618 if (workposted)
11619 lpfc_worker_wake_up(phba);
11620}
11621
11622/**
11623 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
11624 * @phba: Pointer to HBA context object.
11625 * @cq: Pointer to associated CQ
11626 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
11627 *
11628 * This routine process a fast-path work queue completion entry from fast-path
11629 * event queue for FCP command response completion.
11630 **/
11631static void
2a76a283 11632lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11633 struct lpfc_wcqe_complete *wcqe)
11634{
2a76a283 11635 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
11636 struct lpfc_iocbq *cmdiocbq;
11637 struct lpfc_iocbq irspiocbq;
11638 unsigned long iflags;
11639
4f774513
JS
11640 /* Check for response status */
11641 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11642 /* If resource errors reported from HBA, reduce queue
11643 * depth of the SCSI device.
11644 */
e3d2b802
JS
11645 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11646 IOSTAT_LOCAL_REJECT)) &&
11647 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11648 IOERR_NO_RESOURCES))
4f774513 11649 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 11650
4f774513
JS
11651 /* Log the error status */
11652 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11653 "0373 FCP complete error: status=x%x, "
11654 "hw_status=x%x, total_data_specified=%d, "
11655 "parameter=x%x, word3=x%x\n",
11656 bf_get(lpfc_wcqe_c_status, wcqe),
11657 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11658 wcqe->total_data_placed, wcqe->parameter,
11659 wcqe->word3);
11660 }
11661
11662 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
11663 spin_lock_irqsave(&pring->ring_lock, iflags);
11664 pring->stats.iocb_event++;
4f774513
JS
11665 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11666 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11667 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
11668 if (unlikely(!cmdiocbq)) {
11669 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11670 "0374 FCP complete with no corresponding "
11671 "cmdiocb: iotag (%d)\n",
11672 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11673 return;
11674 }
11675 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11676 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11677 "0375 FCP cmdiocb not callback function "
11678 "iotag: (%d)\n",
11679 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11680 return;
11681 }
11682
11683 /* Fake the irspiocb and copy necessary response information */
341af102 11684 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 11685
0f65ff68
JS
11686 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11687 spin_lock_irqsave(&phba->hbalock, iflags);
11688 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11689 spin_unlock_irqrestore(&phba->hbalock, iflags);
11690 }
11691
4f774513
JS
11692 /* Pass the cmd_iocb and the rsp state to the upper layer */
11693 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11694}
11695
11696/**
11697 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11698 * @phba: Pointer to HBA context object.
11699 * @cq: Pointer to completion queue.
11700 * @wcqe: Pointer to work-queue completion queue entry.
11701 *
11702 * This routine handles an fast-path WQ entry comsumed event by invoking the
11703 * proper WQ release routine to the slow-path WQ.
11704 **/
11705static void
11706lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11707 struct lpfc_wcqe_release *wcqe)
11708{
11709 struct lpfc_queue *childwq;
11710 bool wqid_matched = false;
11711 uint16_t fcp_wqid;
11712
11713 /* Check for fast-path FCP work queue release */
11714 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11715 list_for_each_entry(childwq, &cq->child_list, list) {
11716 if (childwq->queue_id == fcp_wqid) {
11717 lpfc_sli4_wq_release(childwq,
11718 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11719 wqid_matched = true;
11720 break;
11721 }
11722 }
11723 /* Report warning log message if no match found */
11724 if (wqid_matched != true)
11725 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11726 "2580 Fast-path wqe consume event carries "
11727 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11728}
11729
11730/**
11731 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11732 * @cq: Pointer to the completion queue.
11733 * @eqe: Pointer to fast-path completion queue entry.
11734 *
11735 * This routine process a fast-path work queue completion entry from fast-path
11736 * event queue for FCP command response completion.
11737 **/
11738static int
11739lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11740 struct lpfc_cqe *cqe)
11741{
11742 struct lpfc_wcqe_release wcqe;
11743 bool workposted = false;
11744
11745 /* Copy the work queue CQE and convert endian order if needed */
11746 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11747
11748 /* Check and process for different type of WCQE and dispatch */
11749 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11750 case CQE_CODE_COMPL_WQE:
b84daac9 11751 cq->CQ_wq++;
4f774513 11752 /* Process the WQ complete event */
98fc5dd9 11753 phba->last_completion_time = jiffies;
2a76a283 11754 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
11755 (struct lpfc_wcqe_complete *)&wcqe);
11756 break;
11757 case CQE_CODE_RELEASE_WQE:
b84daac9 11758 cq->CQ_release_wqe++;
4f774513
JS
11759 /* Process the WQ release event */
11760 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11761 (struct lpfc_wcqe_release *)&wcqe);
11762 break;
11763 case CQE_CODE_XRI_ABORTED:
b84daac9 11764 cq->CQ_xri_aborted++;
4f774513 11765 /* Process the WQ XRI abort event */
bc73905a 11766 phba->last_completion_time = jiffies;
4f774513
JS
11767 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11768 (struct sli4_wcqe_xri_aborted *)&wcqe);
11769 break;
11770 default:
11771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11772 "0144 Not a valid WCQE code: x%x\n",
11773 bf_get(lpfc_wcqe_c_code, &wcqe));
11774 break;
11775 }
11776 return workposted;
11777}
11778
11779/**
67d12733 11780 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
11781 * @phba: Pointer to HBA context object.
11782 * @eqe: Pointer to fast-path event queue entry.
11783 *
11784 * This routine process a event queue entry from the fast-path event queue.
11785 * It will check the MajorCode and MinorCode to determine this is for a
11786 * completion event on a completion queue, if not, an error shall be logged
11787 * and just return. Otherwise, it will get to the corresponding completion
11788 * queue and process all the entries on the completion queue, rearm the
11789 * completion queue, and then return.
11790 **/
11791static void
67d12733
JS
11792lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11793 uint32_t qidx)
4f774513
JS
11794{
11795 struct lpfc_queue *cq;
11796 struct lpfc_cqe *cqe;
11797 bool workposted = false;
11798 uint16_t cqid;
11799 int ecount = 0;
11800
cb5172ea 11801 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 11802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 11803 "0366 Not a valid completion "
4f774513 11804 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
11805 bf_get_le32(lpfc_eqe_major_code, eqe),
11806 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
11807 return;
11808 }
11809
67d12733
JS
11810 /* Get the reference to the corresponding CQ */
11811 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11812
11813 /* Check if this is a Slow path event */
11814 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11815 lpfc_sli4_sp_handle_eqe(phba, eqe,
11816 phba->sli4_hba.hba_eq[qidx]);
11817 return;
11818 }
11819
2e90f4b5
JS
11820 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11821 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11822 "3146 Fast-path completion queues "
11823 "does not exist\n");
11824 return;
11825 }
67d12733 11826 cq = phba->sli4_hba.fcp_cq[qidx];
4f774513 11827 if (unlikely(!cq)) {
75baf696
JS
11828 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11829 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11830 "0367 Fast-path completion queue "
67d12733 11831 "(%d) does not exist\n", qidx);
4f774513
JS
11832 return;
11833 }
11834
4f774513
JS
11835 if (unlikely(cqid != cq->queue_id)) {
11836 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11837 "0368 Miss-matched fast-path completion "
11838 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11839 cqid, cq->queue_id);
11840 return;
11841 }
11842
11843 /* Process all the entries to the CQ */
11844 while ((cqe = lpfc_sli4_cq_get(cq))) {
11845 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
73d91e50 11846 if (!(++ecount % cq->entry_repost))
4f774513
JS
11847 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11848 }
11849
b84daac9
JS
11850 /* Track the max number of CQEs processed in 1 EQ */
11851 if (ecount > cq->CQ_max_cqe)
11852 cq->CQ_max_cqe = ecount;
11853
4f774513
JS
11854 /* Catch the no cq entry condition */
11855 if (unlikely(ecount == 0))
11856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11857 "0369 No entry from fast-path completion "
11858 "queue fcpcqid=%d\n", cq->queue_id);
11859
11860 /* In any case, flash and re-arm the CQ */
11861 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11862
11863 /* wake up worker thread if there are works to be done */
11864 if (workposted)
11865 lpfc_worker_wake_up(phba);
11866}
11867
11868static void
11869lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11870{
11871 struct lpfc_eqe *eqe;
11872
11873 /* walk all the EQ entries and drop on the floor */
11874 while ((eqe = lpfc_sli4_eq_get(eq)))
11875 ;
11876
11877 /* Clear and re-arm the EQ */
11878 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11879}
11880
11881/**
67d12733 11882 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
11883 * @irq: Interrupt number.
11884 * @dev_id: The device context pointer.
11885 *
11886 * This function is directly called from the PCI layer as an interrupt
11887 * service routine when device with SLI-4 interface spec is enabled with
11888 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11889 * ring event in the HBA. However, when the device is enabled with either
11890 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11891 * device-level interrupt handler. When the PCI slot is in error recovery
11892 * or the HBA is undergoing initialization, the interrupt handler will not
11893 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11894 * the intrrupt context. This function is called without any lock held.
11895 * It gets the hbalock to access and update SLI data structures. Note that,
11896 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11897 * equal to that of FCP CQ index.
11898 *
67d12733
JS
11899 * The link attention and ELS ring attention events are handled
11900 * by the worker thread. The interrupt handler signals the worker thread
11901 * and returns for these events. This function is called without any lock
11902 * held. It gets the hbalock to access and update SLI data structures.
11903 *
4f774513
JS
11904 * This function returns IRQ_HANDLED when interrupt is handled else it
11905 * returns IRQ_NONE.
11906 **/
11907irqreturn_t
67d12733 11908lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
11909{
11910 struct lpfc_hba *phba;
11911 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11912 struct lpfc_queue *fpeq;
11913 struct lpfc_eqe *eqe;
11914 unsigned long iflag;
11915 int ecount = 0;
962bc51b 11916 int fcp_eqidx;
4f774513
JS
11917
11918 /* Get the driver's phba structure from the dev_id */
11919 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11920 phba = fcp_eq_hdl->phba;
11921 fcp_eqidx = fcp_eq_hdl->idx;
11922
11923 if (unlikely(!phba))
11924 return IRQ_NONE;
67d12733 11925 if (unlikely(!phba->sli4_hba.hba_eq))
5350d872 11926 return IRQ_NONE;
4f774513
JS
11927
11928 /* Get to the EQ struct associated with this vector */
67d12733 11929 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
2e90f4b5
JS
11930 if (unlikely(!fpeq))
11931 return IRQ_NONE;
4f774513 11932
ba20c853
JS
11933 if (lpfc_fcp_look_ahead) {
11934 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11935 lpfc_sli4_eq_clr_intr(fpeq);
11936 else {
11937 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11938 return IRQ_NONE;
11939 }
11940 }
11941
4f774513
JS
11942 /* Check device state for handling interrupt */
11943 if (unlikely(lpfc_intr_state_check(phba))) {
b84daac9 11944 fpeq->EQ_badstate++;
4f774513
JS
11945 /* Check again for link_state with lock held */
11946 spin_lock_irqsave(&phba->hbalock, iflag);
11947 if (phba->link_state < LPFC_LINK_DOWN)
11948 /* Flush, clear interrupt, and rearm the EQ */
11949 lpfc_sli4_eq_flush(phba, fpeq);
11950 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853
JS
11951 if (lpfc_fcp_look_ahead)
11952 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
11953 return IRQ_NONE;
11954 }
11955
11956 /*
11957 * Process all the event on FCP fast-path EQ
11958 */
11959 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
67d12733 11960 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
73d91e50 11961 if (!(++ecount % fpeq->entry_repost))
4f774513 11962 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
b84daac9 11963 fpeq->EQ_processed++;
4f774513
JS
11964 }
11965
b84daac9
JS
11966 /* Track the max number of EQEs processed in 1 intr */
11967 if (ecount > fpeq->EQ_max_eqe)
11968 fpeq->EQ_max_eqe = ecount;
11969
4f774513
JS
11970 /* Always clear and re-arm the fast-path EQ */
11971 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11972
11973 if (unlikely(ecount == 0)) {
b84daac9 11974 fpeq->EQ_no_entry++;
ba20c853
JS
11975
11976 if (lpfc_fcp_look_ahead) {
11977 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11978 return IRQ_NONE;
11979 }
11980
4f774513
JS
11981 if (phba->intr_type == MSIX)
11982 /* MSI-X treated interrupt served as no EQ share INT */
11983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11984 "0358 MSI-X interrupt with no EQE\n");
11985 else
11986 /* Non MSI-X treated on interrupt as EQ share INT */
11987 return IRQ_NONE;
11988 }
11989
ba20c853
JS
11990 if (lpfc_fcp_look_ahead)
11991 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
11992 return IRQ_HANDLED;
11993} /* lpfc_sli4_fp_intr_handler */
11994
11995/**
11996 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11997 * @irq: Interrupt number.
11998 * @dev_id: The device context pointer.
11999 *
12000 * This function is the device-level interrupt handler to device with SLI-4
12001 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12002 * interrupt mode is enabled and there is an event in the HBA which requires
12003 * driver attention. This function invokes the slow-path interrupt attention
12004 * handling function and fast-path interrupt attention handling function in
12005 * turn to process the relevant HBA attention events. This function is called
12006 * without any lock held. It gets the hbalock to access and update SLI data
12007 * structures.
12008 *
12009 * This function returns IRQ_HANDLED when interrupt is handled, else it
12010 * returns IRQ_NONE.
12011 **/
12012irqreturn_t
12013lpfc_sli4_intr_handler(int irq, void *dev_id)
12014{
12015 struct lpfc_hba *phba;
67d12733
JS
12016 irqreturn_t hba_irq_rc;
12017 bool hba_handled = false;
962bc51b 12018 int fcp_eqidx;
4f774513
JS
12019
12020 /* Get the driver's phba structure from the dev_id */
12021 phba = (struct lpfc_hba *)dev_id;
12022
12023 if (unlikely(!phba))
12024 return IRQ_NONE;
12025
4f774513
JS
12026 /*
12027 * Invoke fast-path host attention interrupt handling as appropriate.
12028 */
67d12733
JS
12029 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12030 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
4f774513 12031 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
67d12733
JS
12032 if (hba_irq_rc == IRQ_HANDLED)
12033 hba_handled |= true;
4f774513
JS
12034 }
12035
67d12733 12036 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
12037} /* lpfc_sli4_intr_handler */
12038
12039/**
12040 * lpfc_sli4_queue_free - free a queue structure and associated memory
12041 * @queue: The queue structure to free.
12042 *
b595076a 12043 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
12044 * the host resident queue. This function must be called after destroying the
12045 * queue on the HBA.
12046 **/
12047void
12048lpfc_sli4_queue_free(struct lpfc_queue *queue)
12049{
12050 struct lpfc_dmabuf *dmabuf;
12051
12052 if (!queue)
12053 return;
12054
12055 while (!list_empty(&queue->page_list)) {
12056 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12057 list);
49198b37 12058 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
4f774513
JS
12059 dmabuf->virt, dmabuf->phys);
12060 kfree(dmabuf);
12061 }
12062 kfree(queue);
12063 return;
12064}
12065
12066/**
12067 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12068 * @phba: The HBA that this queue is being created on.
12069 * @entry_size: The size of each queue entry for this queue.
12070 * @entry count: The number of entries that this queue will handle.
12071 *
12072 * This function allocates a queue structure and the DMAable memory used for
12073 * the host resident queue. This function must be called before creating the
12074 * queue on the HBA.
12075 **/
12076struct lpfc_queue *
12077lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12078 uint32_t entry_count)
12079{
12080 struct lpfc_queue *queue;
12081 struct lpfc_dmabuf *dmabuf;
12082 int x, total_qe_count;
12083 void *dma_pointer;
cb5172ea 12084 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 12085
cb5172ea
JS
12086 if (!phba->sli4_hba.pc_sli4_params.supported)
12087 hw_page_size = SLI4_PAGE_SIZE;
12088
4f774513
JS
12089 queue = kzalloc(sizeof(struct lpfc_queue) +
12090 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12091 if (!queue)
12092 return NULL;
cb5172ea
JS
12093 queue->page_count = (ALIGN(entry_size * entry_count,
12094 hw_page_size))/hw_page_size;
4f774513
JS
12095 INIT_LIST_HEAD(&queue->list);
12096 INIT_LIST_HEAD(&queue->page_list);
12097 INIT_LIST_HEAD(&queue->child_list);
12098 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12099 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12100 if (!dmabuf)
12101 goto out_fail;
12102 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
cb5172ea 12103 hw_page_size, &dmabuf->phys,
4f774513
JS
12104 GFP_KERNEL);
12105 if (!dmabuf->virt) {
12106 kfree(dmabuf);
12107 goto out_fail;
12108 }
cb5172ea 12109 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12110 dmabuf->buffer_tag = x;
12111 list_add_tail(&dmabuf->list, &queue->page_list);
12112 /* initialize queue's entry array */
12113 dma_pointer = dmabuf->virt;
12114 for (; total_qe_count < entry_count &&
cb5172ea 12115 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
12116 total_qe_count++, dma_pointer += entry_size) {
12117 queue->qe[total_qe_count].address = dma_pointer;
12118 }
12119 }
12120 queue->entry_size = entry_size;
12121 queue->entry_count = entry_count;
73d91e50
JS
12122
12123 /*
12124 * entry_repost is calculated based on the number of entries in the
12125 * queue. This works out except for RQs. If buffers are NOT initially
12126 * posted for every RQE, entry_repost should be adjusted accordingly.
12127 */
12128 queue->entry_repost = (entry_count >> 3);
12129 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12130 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
4f774513
JS
12131 queue->phba = phba;
12132
12133 return queue;
12134out_fail:
12135 lpfc_sli4_queue_free(queue);
12136 return NULL;
12137}
12138
962bc51b
JS
12139/**
12140 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12141 * @phba: HBA structure that indicates port to create a queue on.
12142 * @pci_barset: PCI BAR set flag.
12143 *
12144 * This function shall perform iomap of the specified PCI BAR address to host
12145 * memory address if not already done so and return it. The returned host
12146 * memory address can be NULL.
12147 */
12148static void __iomem *
12149lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12150{
12151 struct pci_dev *pdev;
12152 unsigned long bar_map, bar_map_len;
12153
12154 if (!phba->pcidev)
12155 return NULL;
12156 else
12157 pdev = phba->pcidev;
12158
12159 switch (pci_barset) {
12160 case WQ_PCI_BAR_0_AND_1:
12161 if (!phba->pci_bar0_memmap_p) {
12162 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12163 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12164 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12165 }
12166 return phba->pci_bar0_memmap_p;
12167 case WQ_PCI_BAR_2_AND_3:
12168 if (!phba->pci_bar2_memmap_p) {
12169 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12170 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12171 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12172 }
12173 return phba->pci_bar2_memmap_p;
12174 case WQ_PCI_BAR_4_AND_5:
12175 if (!phba->pci_bar4_memmap_p) {
12176 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12177 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12178 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12179 }
12180 return phba->pci_bar4_memmap_p;
12181 default:
12182 break;
12183 }
12184 return NULL;
12185}
12186
173edbb2
JS
12187/**
12188 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12189 * @phba: HBA structure that indicates port to create a queue on.
12190 * @startq: The starting FCP EQ to modify
12191 *
12192 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12193 *
12194 * The @phba struct is used to send mailbox command to HBA. The @startq
12195 * is used to get the starting FCP EQ to change.
12196 * This function is asynchronous and will wait for the mailbox
12197 * command to finish before continuing.
12198 *
12199 * On success this function will return a zero. If unable to allocate enough
12200 * memory this function will return -ENOMEM. If the queue create mailbox command
12201 * fails this function will return -ENXIO.
12202 **/
12203uint32_t
12204lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12205{
12206 struct lpfc_mbx_modify_eq_delay *eq_delay;
12207 LPFC_MBOXQ_t *mbox;
12208 struct lpfc_queue *eq;
12209 int cnt, rc, length, status = 0;
12210 uint32_t shdr_status, shdr_add_status;
ee02006b 12211 uint32_t result;
173edbb2
JS
12212 int fcp_eqidx;
12213 union lpfc_sli4_cfg_shdr *shdr;
12214 uint16_t dmult;
12215
67d12733 12216 if (startq >= phba->cfg_fcp_io_channel)
173edbb2
JS
12217 return 0;
12218
12219 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12220 if (!mbox)
12221 return -ENOMEM;
12222 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12223 sizeof(struct lpfc_sli4_cfg_mhdr));
12224 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12225 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12226 length, LPFC_SLI4_MBX_EMBED);
12227 eq_delay = &mbox->u.mqe.un.eq_delay;
12228
12229 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12230 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12231 if (result > LPFC_DMULT_CONST)
12232 dmult = 0;
12233 else
12234 dmult = LPFC_DMULT_CONST/result - 1;
173edbb2
JS
12235
12236 cnt = 0;
67d12733 12237 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
173edbb2 12238 fcp_eqidx++) {
67d12733 12239 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
173edbb2
JS
12240 if (!eq)
12241 continue;
12242 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12243 eq_delay->u.request.eq[cnt].phase = 0;
12244 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12245 cnt++;
12246 if (cnt >= LPFC_MAX_EQ_DELAY)
12247 break;
12248 }
12249 eq_delay->u.request.num_eq = cnt;
12250
12251 mbox->vport = phba->pport;
12252 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12253 mbox->context1 = NULL;
12254 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12255 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12256 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12257 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12258 if (shdr_status || shdr_add_status || rc) {
12259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12260 "2512 MODIFY_EQ_DELAY mailbox failed with "
12261 "status x%x add_status x%x, mbx status x%x\n",
12262 shdr_status, shdr_add_status, rc);
12263 status = -ENXIO;
12264 }
12265 mempool_free(mbox, phba->mbox_mem_pool);
12266 return status;
12267}
12268
4f774513
JS
12269/**
12270 * lpfc_eq_create - Create an Event Queue on the HBA
12271 * @phba: HBA structure that indicates port to create a queue on.
12272 * @eq: The queue structure to use to create the event queue.
12273 * @imax: The maximum interrupt per second limit.
12274 *
12275 * This function creates an event queue, as detailed in @eq, on a port,
12276 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12277 *
12278 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12279 * is used to get the entry count and entry size that are necessary to
12280 * determine the number of pages to allocate and use for this queue. This
12281 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12282 * event queue. This function is asynchronous and will wait for the mailbox
12283 * command to finish before continuing.
12284 *
12285 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12286 * memory this function will return -ENOMEM. If the queue create mailbox command
12287 * fails this function will return -ENXIO.
4f774513
JS
12288 **/
12289uint32_t
ee02006b 12290lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
12291{
12292 struct lpfc_mbx_eq_create *eq_create;
12293 LPFC_MBOXQ_t *mbox;
12294 int rc, length, status = 0;
12295 struct lpfc_dmabuf *dmabuf;
12296 uint32_t shdr_status, shdr_add_status;
12297 union lpfc_sli4_cfg_shdr *shdr;
12298 uint16_t dmult;
49198b37
JS
12299 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12300
2e90f4b5
JS
12301 /* sanity check on queue memory */
12302 if (!eq)
12303 return -ENODEV;
49198b37
JS
12304 if (!phba->sli4_hba.pc_sli4_params.supported)
12305 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12306
12307 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12308 if (!mbox)
12309 return -ENOMEM;
12310 length = (sizeof(struct lpfc_mbx_eq_create) -
12311 sizeof(struct lpfc_sli4_cfg_mhdr));
12312 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12313 LPFC_MBOX_OPCODE_EQ_CREATE,
12314 length, LPFC_SLI4_MBX_EMBED);
12315 eq_create = &mbox->u.mqe.un.eq_create;
12316 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12317 eq->page_count);
12318 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12319 LPFC_EQE_SIZE);
12320 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12321 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12322 if (imax > LPFC_DMULT_CONST)
12323 dmult = 0;
12324 else
12325 dmult = LPFC_DMULT_CONST/imax - 1;
4f774513
JS
12326 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12327 dmult);
12328 switch (eq->entry_count) {
12329 default:
12330 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12331 "0360 Unsupported EQ count. (%d)\n",
12332 eq->entry_count);
12333 if (eq->entry_count < 256)
12334 return -EINVAL;
12335 /* otherwise default to smallest count (drop through) */
12336 case 256:
12337 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12338 LPFC_EQ_CNT_256);
12339 break;
12340 case 512:
12341 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12342 LPFC_EQ_CNT_512);
12343 break;
12344 case 1024:
12345 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12346 LPFC_EQ_CNT_1024);
12347 break;
12348 case 2048:
12349 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12350 LPFC_EQ_CNT_2048);
12351 break;
12352 case 4096:
12353 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12354 LPFC_EQ_CNT_4096);
12355 break;
12356 }
12357 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 12358 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12359 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12360 putPaddrLow(dmabuf->phys);
12361 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12362 putPaddrHigh(dmabuf->phys);
12363 }
12364 mbox->vport = phba->pport;
12365 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12366 mbox->context1 = NULL;
12367 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12368 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12369 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12370 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12371 if (shdr_status || shdr_add_status || rc) {
12372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12373 "2500 EQ_CREATE mailbox failed with "
12374 "status x%x add_status x%x, mbx status x%x\n",
12375 shdr_status, shdr_add_status, rc);
12376 status = -ENXIO;
12377 }
12378 eq->type = LPFC_EQ;
12379 eq->subtype = LPFC_NONE;
12380 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12381 if (eq->queue_id == 0xFFFF)
12382 status = -ENXIO;
12383 eq->host_index = 0;
12384 eq->hba_index = 0;
12385
8fa38513 12386 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12387 return status;
12388}
12389
12390/**
12391 * lpfc_cq_create - Create a Completion Queue on the HBA
12392 * @phba: HBA structure that indicates port to create a queue on.
12393 * @cq: The queue structure to use to create the completion queue.
12394 * @eq: The event queue to bind this completion queue to.
12395 *
12396 * This function creates a completion queue, as detailed in @wq, on a port,
12397 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12398 *
12399 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12400 * is used to get the entry count and entry size that are necessary to
12401 * determine the number of pages to allocate and use for this queue. The @eq
12402 * is used to indicate which event queue to bind this completion queue to. This
12403 * function will send the CQ_CREATE mailbox command to the HBA to setup the
12404 * completion queue. This function is asynchronous and will wait for the mailbox
12405 * command to finish before continuing.
12406 *
12407 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12408 * memory this function will return -ENOMEM. If the queue create mailbox command
12409 * fails this function will return -ENXIO.
4f774513
JS
12410 **/
12411uint32_t
12412lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12413 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12414{
12415 struct lpfc_mbx_cq_create *cq_create;
12416 struct lpfc_dmabuf *dmabuf;
12417 LPFC_MBOXQ_t *mbox;
12418 int rc, length, status = 0;
12419 uint32_t shdr_status, shdr_add_status;
12420 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12421 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12422
2e90f4b5
JS
12423 /* sanity check on queue memory */
12424 if (!cq || !eq)
12425 return -ENODEV;
49198b37
JS
12426 if (!phba->sli4_hba.pc_sli4_params.supported)
12427 hw_page_size = SLI4_PAGE_SIZE;
12428
4f774513
JS
12429 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12430 if (!mbox)
12431 return -ENOMEM;
12432 length = (sizeof(struct lpfc_mbx_cq_create) -
12433 sizeof(struct lpfc_sli4_cfg_mhdr));
12434 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12435 LPFC_MBOX_OPCODE_CQ_CREATE,
12436 length, LPFC_SLI4_MBX_EMBED);
12437 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 12438 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
12439 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12440 cq->page_count);
12441 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12442 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
12443 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12444 phba->sli4_hba.pc_sli4_params.cqv);
12445 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
c31098ce
JS
12446 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12447 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
5a6f133e
JS
12448 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12449 eq->queue_id);
12450 } else {
12451 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12452 eq->queue_id);
12453 }
4f774513
JS
12454 switch (cq->entry_count) {
12455 default:
12456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12457 "0361 Unsupported CQ count. (%d)\n",
12458 cq->entry_count);
4f4c1863
JS
12459 if (cq->entry_count < 256) {
12460 status = -EINVAL;
12461 goto out;
12462 }
4f774513
JS
12463 /* otherwise default to smallest count (drop through) */
12464 case 256:
12465 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12466 LPFC_CQ_CNT_256);
12467 break;
12468 case 512:
12469 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12470 LPFC_CQ_CNT_512);
12471 break;
12472 case 1024:
12473 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12474 LPFC_CQ_CNT_1024);
12475 break;
12476 }
12477 list_for_each_entry(dmabuf, &cq->page_list, list) {
49198b37 12478 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12479 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12480 putPaddrLow(dmabuf->phys);
12481 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12482 putPaddrHigh(dmabuf->phys);
12483 }
12484 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12485
12486 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12487 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12488 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12489 if (shdr_status || shdr_add_status || rc) {
12490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12491 "2501 CQ_CREATE mailbox failed with "
12492 "status x%x add_status x%x, mbx status x%x\n",
12493 shdr_status, shdr_add_status, rc);
12494 status = -ENXIO;
12495 goto out;
12496 }
12497 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12498 if (cq->queue_id == 0xFFFF) {
12499 status = -ENXIO;
12500 goto out;
12501 }
12502 /* link the cq onto the parent eq child list */
12503 list_add_tail(&cq->list, &eq->child_list);
12504 /* Set up completion queue's type and subtype */
12505 cq->type = type;
12506 cq->subtype = subtype;
12507 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 12508 cq->assoc_qid = eq->queue_id;
4f774513
JS
12509 cq->host_index = 0;
12510 cq->hba_index = 0;
4f774513 12511
8fa38513
JS
12512out:
12513 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12514 return status;
12515}
12516
b19a061a
JS
12517/**
12518 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12519 * @phba: HBA structure that indicates port to create a queue on.
12520 * @mq: The queue structure to use to create the mailbox queue.
12521 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12522 * @cq: The completion queue to associate with this cq.
12523 *
12524 * This function provides failback (fb) functionality when the
12525 * mq_create_ext fails on older FW generations. It's purpose is identical
12526 * to mq_create_ext otherwise.
12527 *
12528 * This routine cannot fail as all attributes were previously accessed and
12529 * initialized in mq_create_ext.
12530 **/
12531static void
12532lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12533 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12534{
12535 struct lpfc_mbx_mq_create *mq_create;
12536 struct lpfc_dmabuf *dmabuf;
12537 int length;
12538
12539 length = (sizeof(struct lpfc_mbx_mq_create) -
12540 sizeof(struct lpfc_sli4_cfg_mhdr));
12541 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12542 LPFC_MBOX_OPCODE_MQ_CREATE,
12543 length, LPFC_SLI4_MBX_EMBED);
12544 mq_create = &mbox->u.mqe.un.mq_create;
12545 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12546 mq->page_count);
12547 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12548 cq->queue_id);
12549 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12550 switch (mq->entry_count) {
12551 case 16:
5a6f133e
JS
12552 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12553 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
12554 break;
12555 case 32:
5a6f133e
JS
12556 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12557 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
12558 break;
12559 case 64:
5a6f133e
JS
12560 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12561 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
12562 break;
12563 case 128:
5a6f133e
JS
12564 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12565 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
12566 break;
12567 }
12568 list_for_each_entry(dmabuf, &mq->page_list, list) {
12569 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12570 putPaddrLow(dmabuf->phys);
12571 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12572 putPaddrHigh(dmabuf->phys);
12573 }
12574}
12575
04c68496
JS
12576/**
12577 * lpfc_mq_create - Create a mailbox Queue on the HBA
12578 * @phba: HBA structure that indicates port to create a queue on.
12579 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
12580 * @cq: The completion queue to associate with this cq.
12581 * @subtype: The queue's subtype.
04c68496
JS
12582 *
12583 * This function creates a mailbox queue, as detailed in @mq, on a port,
12584 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12585 *
12586 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12587 * is used to get the entry count and entry size that are necessary to
12588 * determine the number of pages to allocate and use for this queue. This
12589 * function will send the MQ_CREATE mailbox command to the HBA to setup the
12590 * mailbox queue. This function is asynchronous and will wait for the mailbox
12591 * command to finish before continuing.
12592 *
12593 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12594 * memory this function will return -ENOMEM. If the queue create mailbox command
12595 * fails this function will return -ENXIO.
04c68496 12596 **/
b19a061a 12597int32_t
04c68496
JS
12598lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12599 struct lpfc_queue *cq, uint32_t subtype)
12600{
12601 struct lpfc_mbx_mq_create *mq_create;
b19a061a 12602 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
12603 struct lpfc_dmabuf *dmabuf;
12604 LPFC_MBOXQ_t *mbox;
12605 int rc, length, status = 0;
12606 uint32_t shdr_status, shdr_add_status;
12607 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12608 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 12609
2e90f4b5
JS
12610 /* sanity check on queue memory */
12611 if (!mq || !cq)
12612 return -ENODEV;
49198b37
JS
12613 if (!phba->sli4_hba.pc_sli4_params.supported)
12614 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 12615
04c68496
JS
12616 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12617 if (!mbox)
12618 return -ENOMEM;
b19a061a 12619 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
12620 sizeof(struct lpfc_sli4_cfg_mhdr));
12621 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 12622 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 12623 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
12624
12625 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 12626 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
12627 bf_set(lpfc_mbx_mq_create_ext_num_pages,
12628 &mq_create_ext->u.request, mq->page_count);
12629 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12630 &mq_create_ext->u.request, 1);
12631 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
12632 &mq_create_ext->u.request, 1);
12633 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12634 &mq_create_ext->u.request, 1);
70f3c073
JS
12635 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12636 &mq_create_ext->u.request, 1);
12637 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12638 &mq_create_ext->u.request, 1);
b19a061a 12639 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
12640 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12641 phba->sli4_hba.pc_sli4_params.mqv);
12642 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12643 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12644 cq->queue_id);
12645 else
12646 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12647 cq->queue_id);
04c68496
JS
12648 switch (mq->entry_count) {
12649 default:
12650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12651 "0362 Unsupported MQ count. (%d)\n",
12652 mq->entry_count);
4f4c1863
JS
12653 if (mq->entry_count < 16) {
12654 status = -EINVAL;
12655 goto out;
12656 }
04c68496
JS
12657 /* otherwise default to smallest count (drop through) */
12658 case 16:
5a6f133e
JS
12659 bf_set(lpfc_mq_context_ring_size,
12660 &mq_create_ext->u.request.context,
12661 LPFC_MQ_RING_SIZE_16);
04c68496
JS
12662 break;
12663 case 32:
5a6f133e
JS
12664 bf_set(lpfc_mq_context_ring_size,
12665 &mq_create_ext->u.request.context,
12666 LPFC_MQ_RING_SIZE_32);
04c68496
JS
12667 break;
12668 case 64:
5a6f133e
JS
12669 bf_set(lpfc_mq_context_ring_size,
12670 &mq_create_ext->u.request.context,
12671 LPFC_MQ_RING_SIZE_64);
04c68496
JS
12672 break;
12673 case 128:
5a6f133e
JS
12674 bf_set(lpfc_mq_context_ring_size,
12675 &mq_create_ext->u.request.context,
12676 LPFC_MQ_RING_SIZE_128);
04c68496
JS
12677 break;
12678 }
12679 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 12680 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 12681 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 12682 putPaddrLow(dmabuf->phys);
b19a061a 12683 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
12684 putPaddrHigh(dmabuf->phys);
12685 }
12686 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
12687 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12688 &mq_create_ext->u.response);
12689 if (rc != MBX_SUCCESS) {
12690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12691 "2795 MQ_CREATE_EXT failed with "
12692 "status x%x. Failback to MQ_CREATE.\n",
12693 rc);
12694 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12695 mq_create = &mbox->u.mqe.un.mq_create;
12696 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12697 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12698 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12699 &mq_create->u.response);
12700 }
12701
04c68496 12702 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
12703 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12704 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12705 if (shdr_status || shdr_add_status || rc) {
12706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12707 "2502 MQ_CREATE mailbox failed with "
12708 "status x%x add_status x%x, mbx status x%x\n",
12709 shdr_status, shdr_add_status, rc);
12710 status = -ENXIO;
12711 goto out;
12712 }
04c68496
JS
12713 if (mq->queue_id == 0xFFFF) {
12714 status = -ENXIO;
12715 goto out;
12716 }
12717 mq->type = LPFC_MQ;
2a622bfb 12718 mq->assoc_qid = cq->queue_id;
04c68496
JS
12719 mq->subtype = subtype;
12720 mq->host_index = 0;
12721 mq->hba_index = 0;
12722
12723 /* link the mq onto the parent cq child list */
12724 list_add_tail(&mq->list, &cq->child_list);
12725out:
8fa38513 12726 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
12727 return status;
12728}
12729
4f774513
JS
12730/**
12731 * lpfc_wq_create - Create a Work Queue on the HBA
12732 * @phba: HBA structure that indicates port to create a queue on.
12733 * @wq: The queue structure to use to create the work queue.
12734 * @cq: The completion queue to bind this work queue to.
12735 * @subtype: The subtype of the work queue indicating its functionality.
12736 *
12737 * This function creates a work queue, as detailed in @wq, on a port, described
12738 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12739 *
12740 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12741 * is used to get the entry count and entry size that are necessary to
12742 * determine the number of pages to allocate and use for this queue. The @cq
12743 * is used to indicate which completion queue to bind this work queue to. This
12744 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12745 * work queue. This function is asynchronous and will wait for the mailbox
12746 * command to finish before continuing.
12747 *
12748 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12749 * memory this function will return -ENOMEM. If the queue create mailbox command
12750 * fails this function will return -ENXIO.
4f774513
JS
12751 **/
12752uint32_t
12753lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12754 struct lpfc_queue *cq, uint32_t subtype)
12755{
12756 struct lpfc_mbx_wq_create *wq_create;
12757 struct lpfc_dmabuf *dmabuf;
12758 LPFC_MBOXQ_t *mbox;
12759 int rc, length, status = 0;
12760 uint32_t shdr_status, shdr_add_status;
12761 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12762 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 12763 struct dma_address *page;
962bc51b
JS
12764 void __iomem *bar_memmap_p;
12765 uint32_t db_offset;
12766 uint16_t pci_barset;
49198b37 12767
2e90f4b5
JS
12768 /* sanity check on queue memory */
12769 if (!wq || !cq)
12770 return -ENODEV;
49198b37
JS
12771 if (!phba->sli4_hba.pc_sli4_params.supported)
12772 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12773
12774 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12775 if (!mbox)
12776 return -ENOMEM;
12777 length = (sizeof(struct lpfc_mbx_wq_create) -
12778 sizeof(struct lpfc_sli4_cfg_mhdr));
12779 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12780 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12781 length, LPFC_SLI4_MBX_EMBED);
12782 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 12783 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
12784 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12785 wq->page_count);
12786 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12787 cq->queue_id);
5a6f133e
JS
12788 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12789 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 12790
5a6f133e
JS
12791 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12792 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12793 wq->entry_count);
12794 switch (wq->entry_size) {
12795 default:
12796 case 64:
12797 bf_set(lpfc_mbx_wq_create_wqe_size,
12798 &wq_create->u.request_1,
12799 LPFC_WQ_WQE_SIZE_64);
12800 break;
12801 case 128:
12802 bf_set(lpfc_mbx_wq_create_wqe_size,
12803 &wq_create->u.request_1,
12804 LPFC_WQ_WQE_SIZE_128);
12805 break;
12806 }
12807 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12808 (PAGE_SIZE/SLI4_PAGE_SIZE));
12809 page = wq_create->u.request_1.page;
12810 } else {
12811 page = wq_create->u.request.page;
12812 }
4f774513 12813 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 12814 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
12815 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12816 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 12817 }
962bc51b
JS
12818
12819 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12820 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12821
4f774513
JS
12822 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12823 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12826 if (shdr_status || shdr_add_status || rc) {
12827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12828 "2503 WQ_CREATE mailbox failed with "
12829 "status x%x add_status x%x, mbx status x%x\n",
12830 shdr_status, shdr_add_status, rc);
12831 status = -ENXIO;
12832 goto out;
12833 }
12834 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12835 if (wq->queue_id == 0xFFFF) {
12836 status = -ENXIO;
12837 goto out;
12838 }
962bc51b
JS
12839 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12840 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12841 &wq_create->u.response);
12842 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12843 (wq->db_format != LPFC_DB_RING_FORMAT)) {
12844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12845 "3265 WQ[%d] doorbell format not "
12846 "supported: x%x\n", wq->queue_id,
12847 wq->db_format);
12848 status = -EINVAL;
12849 goto out;
12850 }
12851 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12852 &wq_create->u.response);
12853 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12854 if (!bar_memmap_p) {
12855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12856 "3263 WQ[%d] failed to memmap pci "
12857 "barset:x%x\n", wq->queue_id,
12858 pci_barset);
12859 status = -ENOMEM;
12860 goto out;
12861 }
12862 db_offset = wq_create->u.response.doorbell_offset;
12863 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12864 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12866 "3252 WQ[%d] doorbell offset not "
12867 "supported: x%x\n", wq->queue_id,
12868 db_offset);
12869 status = -EINVAL;
12870 goto out;
12871 }
12872 wq->db_regaddr = bar_memmap_p + db_offset;
12873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12874 "3264 WQ[%d]: barset:x%x, offset:x%x\n",
12875 wq->queue_id, pci_barset, db_offset);
12876 } else {
12877 wq->db_format = LPFC_DB_LIST_FORMAT;
12878 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12879 }
4f774513 12880 wq->type = LPFC_WQ;
2a622bfb 12881 wq->assoc_qid = cq->queue_id;
4f774513
JS
12882 wq->subtype = subtype;
12883 wq->host_index = 0;
12884 wq->hba_index = 0;
ff78d8f9 12885 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
12886
12887 /* link the wq onto the parent cq child list */
12888 list_add_tail(&wq->list, &cq->child_list);
12889out:
8fa38513 12890 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12891 return status;
12892}
12893
73d91e50
JS
12894/**
12895 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12896 * @phba: HBA structure that indicates port to create a queue on.
12897 * @rq: The queue structure to use for the receive queue.
12898 * @qno: The associated HBQ number
12899 *
12900 *
12901 * For SLI4 we need to adjust the RQ repost value based on
12902 * the number of buffers that are initially posted to the RQ.
12903 */
12904void
12905lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12906{
12907 uint32_t cnt;
12908
2e90f4b5
JS
12909 /* sanity check on queue memory */
12910 if (!rq)
12911 return;
73d91e50
JS
12912 cnt = lpfc_hbq_defs[qno]->entry_count;
12913
12914 /* Recalc repost for RQs based on buffers initially posted */
12915 cnt = (cnt >> 3);
12916 if (cnt < LPFC_QUEUE_MIN_REPOST)
12917 cnt = LPFC_QUEUE_MIN_REPOST;
12918
12919 rq->entry_repost = cnt;
12920}
12921
4f774513
JS
12922/**
12923 * lpfc_rq_create - Create a Receive Queue on the HBA
12924 * @phba: HBA structure that indicates port to create a queue on.
12925 * @hrq: The queue structure to use to create the header receive queue.
12926 * @drq: The queue structure to use to create the data receive queue.
12927 * @cq: The completion queue to bind this work queue to.
12928 *
12929 * This function creates a receive buffer queue pair , as detailed in @hrq and
12930 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12931 * to the HBA.
12932 *
12933 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12934 * struct is used to get the entry count that is necessary to determine the
12935 * number of pages to use for this queue. The @cq is used to indicate which
12936 * completion queue to bind received buffers that are posted to these queues to.
12937 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12938 * receive queue pair. This function is asynchronous and will wait for the
12939 * mailbox command to finish before continuing.
12940 *
12941 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12942 * memory this function will return -ENOMEM. If the queue create mailbox command
12943 * fails this function will return -ENXIO.
4f774513
JS
12944 **/
12945uint32_t
12946lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12947 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12948{
12949 struct lpfc_mbx_rq_create *rq_create;
12950 struct lpfc_dmabuf *dmabuf;
12951 LPFC_MBOXQ_t *mbox;
12952 int rc, length, status = 0;
12953 uint32_t shdr_status, shdr_add_status;
12954 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12955 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
12956 void __iomem *bar_memmap_p;
12957 uint32_t db_offset;
12958 uint16_t pci_barset;
49198b37 12959
2e90f4b5
JS
12960 /* sanity check on queue memory */
12961 if (!hrq || !drq || !cq)
12962 return -ENODEV;
49198b37
JS
12963 if (!phba->sli4_hba.pc_sli4_params.supported)
12964 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12965
12966 if (hrq->entry_count != drq->entry_count)
12967 return -EINVAL;
12968 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12969 if (!mbox)
12970 return -ENOMEM;
12971 length = (sizeof(struct lpfc_mbx_rq_create) -
12972 sizeof(struct lpfc_sli4_cfg_mhdr));
12973 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12974 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12975 length, LPFC_SLI4_MBX_EMBED);
12976 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
12977 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12978 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12979 phba->sli4_hba.pc_sli4_params.rqv);
12980 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12981 bf_set(lpfc_rq_context_rqe_count_1,
12982 &rq_create->u.request.context,
12983 hrq->entry_count);
12984 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
12985 bf_set(lpfc_rq_context_rqe_size,
12986 &rq_create->u.request.context,
12987 LPFC_RQE_SIZE_8);
12988 bf_set(lpfc_rq_context_page_size,
12989 &rq_create->u.request.context,
12990 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
12991 } else {
12992 switch (hrq->entry_count) {
12993 default:
12994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12995 "2535 Unsupported RQ count. (%d)\n",
12996 hrq->entry_count);
4f4c1863
JS
12997 if (hrq->entry_count < 512) {
12998 status = -EINVAL;
12999 goto out;
13000 }
5a6f133e
JS
13001 /* otherwise default to smallest count (drop through) */
13002 case 512:
13003 bf_set(lpfc_rq_context_rqe_count,
13004 &rq_create->u.request.context,
13005 LPFC_RQ_RING_SIZE_512);
13006 break;
13007 case 1024:
13008 bf_set(lpfc_rq_context_rqe_count,
13009 &rq_create->u.request.context,
13010 LPFC_RQ_RING_SIZE_1024);
13011 break;
13012 case 2048:
13013 bf_set(lpfc_rq_context_rqe_count,
13014 &rq_create->u.request.context,
13015 LPFC_RQ_RING_SIZE_2048);
13016 break;
13017 case 4096:
13018 bf_set(lpfc_rq_context_rqe_count,
13019 &rq_create->u.request.context,
13020 LPFC_RQ_RING_SIZE_4096);
13021 break;
13022 }
13023 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13024 LPFC_HDR_BUF_SIZE);
4f774513
JS
13025 }
13026 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13027 cq->queue_id);
13028 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13029 hrq->page_count);
4f774513 13030 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 13031 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
13032 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13033 putPaddrLow(dmabuf->phys);
13034 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13035 putPaddrHigh(dmabuf->phys);
13036 }
962bc51b
JS
13037 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13038 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13039
4f774513
JS
13040 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13041 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
13042 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13043 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13044 if (shdr_status || shdr_add_status || rc) {
13045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13046 "2504 RQ_CREATE mailbox failed with "
13047 "status x%x add_status x%x, mbx status x%x\n",
13048 shdr_status, shdr_add_status, rc);
13049 status = -ENXIO;
13050 goto out;
13051 }
13052 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13053 if (hrq->queue_id == 0xFFFF) {
13054 status = -ENXIO;
13055 goto out;
13056 }
962bc51b
JS
13057
13058 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13059 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13060 &rq_create->u.response);
13061 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13062 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13064 "3262 RQ [%d] doorbell format not "
13065 "supported: x%x\n", hrq->queue_id,
13066 hrq->db_format);
13067 status = -EINVAL;
13068 goto out;
13069 }
13070
13071 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13072 &rq_create->u.response);
13073 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13074 if (!bar_memmap_p) {
13075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13076 "3269 RQ[%d] failed to memmap pci "
13077 "barset:x%x\n", hrq->queue_id,
13078 pci_barset);
13079 status = -ENOMEM;
13080 goto out;
13081 }
13082
13083 db_offset = rq_create->u.response.doorbell_offset;
13084 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13085 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13087 "3270 RQ[%d] doorbell offset not "
13088 "supported: x%x\n", hrq->queue_id,
13089 db_offset);
13090 status = -EINVAL;
13091 goto out;
13092 }
13093 hrq->db_regaddr = bar_memmap_p + db_offset;
13094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13095 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
13096 hrq->queue_id, pci_barset, db_offset);
13097 } else {
13098 hrq->db_format = LPFC_DB_RING_FORMAT;
13099 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13100 }
4f774513 13101 hrq->type = LPFC_HRQ;
2a622bfb 13102 hrq->assoc_qid = cq->queue_id;
4f774513
JS
13103 hrq->subtype = subtype;
13104 hrq->host_index = 0;
13105 hrq->hba_index = 0;
13106
13107 /* now create the data queue */
13108 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13109 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13110 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
13111 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13112 phba->sli4_hba.pc_sli4_params.rqv);
13113 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13114 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 13115 &rq_create->u.request.context, hrq->entry_count);
5a6f133e 13116 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
c31098ce
JS
13117 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
13118 LPFC_RQE_SIZE_8);
13119 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
13120 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
13121 } else {
13122 switch (drq->entry_count) {
13123 default:
13124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13125 "2536 Unsupported RQ count. (%d)\n",
13126 drq->entry_count);
4f4c1863
JS
13127 if (drq->entry_count < 512) {
13128 status = -EINVAL;
13129 goto out;
13130 }
5a6f133e
JS
13131 /* otherwise default to smallest count (drop through) */
13132 case 512:
13133 bf_set(lpfc_rq_context_rqe_count,
13134 &rq_create->u.request.context,
13135 LPFC_RQ_RING_SIZE_512);
13136 break;
13137 case 1024:
13138 bf_set(lpfc_rq_context_rqe_count,
13139 &rq_create->u.request.context,
13140 LPFC_RQ_RING_SIZE_1024);
13141 break;
13142 case 2048:
13143 bf_set(lpfc_rq_context_rqe_count,
13144 &rq_create->u.request.context,
13145 LPFC_RQ_RING_SIZE_2048);
13146 break;
13147 case 4096:
13148 bf_set(lpfc_rq_context_rqe_count,
13149 &rq_create->u.request.context,
13150 LPFC_RQ_RING_SIZE_4096);
13151 break;
13152 }
13153 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13154 LPFC_DATA_BUF_SIZE);
4f774513
JS
13155 }
13156 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13157 cq->queue_id);
13158 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13159 drq->page_count);
4f774513
JS
13160 list_for_each_entry(dmabuf, &drq->page_list, list) {
13161 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13162 putPaddrLow(dmabuf->phys);
13163 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13164 putPaddrHigh(dmabuf->phys);
13165 }
962bc51b
JS
13166 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13167 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
13168 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13169 /* The IOCTL status is embedded in the mailbox subheader. */
13170 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13171 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13172 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13173 if (shdr_status || shdr_add_status || rc) {
13174 status = -ENXIO;
13175 goto out;
13176 }
13177 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13178 if (drq->queue_id == 0xFFFF) {
13179 status = -ENXIO;
13180 goto out;
13181 }
13182 drq->type = LPFC_DRQ;
2a622bfb 13183 drq->assoc_qid = cq->queue_id;
4f774513
JS
13184 drq->subtype = subtype;
13185 drq->host_index = 0;
13186 drq->hba_index = 0;
13187
13188 /* link the header and data RQs onto the parent cq child list */
13189 list_add_tail(&hrq->list, &cq->child_list);
13190 list_add_tail(&drq->list, &cq->child_list);
13191
13192out:
8fa38513 13193 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
13194 return status;
13195}
13196
13197/**
13198 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13199 * @eq: The queue structure associated with the queue to destroy.
13200 *
13201 * This function destroys a queue, as detailed in @eq by sending an mailbox
13202 * command, specific to the type of queue, to the HBA.
13203 *
13204 * The @eq struct is used to get the queue ID of the queue to destroy.
13205 *
13206 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13207 * command fails this function will return -ENXIO.
4f774513
JS
13208 **/
13209uint32_t
13210lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13211{
13212 LPFC_MBOXQ_t *mbox;
13213 int rc, length, status = 0;
13214 uint32_t shdr_status, shdr_add_status;
13215 union lpfc_sli4_cfg_shdr *shdr;
13216
2e90f4b5 13217 /* sanity check on queue memory */
4f774513
JS
13218 if (!eq)
13219 return -ENODEV;
13220 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13221 if (!mbox)
13222 return -ENOMEM;
13223 length = (sizeof(struct lpfc_mbx_eq_destroy) -
13224 sizeof(struct lpfc_sli4_cfg_mhdr));
13225 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13226 LPFC_MBOX_OPCODE_EQ_DESTROY,
13227 length, LPFC_SLI4_MBX_EMBED);
13228 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13229 eq->queue_id);
13230 mbox->vport = eq->phba->pport;
13231 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13232
13233 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13234 /* The IOCTL status is embedded in the mailbox subheader. */
13235 shdr = (union lpfc_sli4_cfg_shdr *)
13236 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13237 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13238 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13239 if (shdr_status || shdr_add_status || rc) {
13240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13241 "2505 EQ_DESTROY mailbox failed with "
13242 "status x%x add_status x%x, mbx status x%x\n",
13243 shdr_status, shdr_add_status, rc);
13244 status = -ENXIO;
13245 }
13246
13247 /* Remove eq from any list */
13248 list_del_init(&eq->list);
8fa38513 13249 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
13250 return status;
13251}
13252
13253/**
13254 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13255 * @cq: The queue structure associated with the queue to destroy.
13256 *
13257 * This function destroys a queue, as detailed in @cq by sending an mailbox
13258 * command, specific to the type of queue, to the HBA.
13259 *
13260 * The @cq struct is used to get the queue ID of the queue to destroy.
13261 *
13262 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13263 * command fails this function will return -ENXIO.
4f774513
JS
13264 **/
13265uint32_t
13266lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13267{
13268 LPFC_MBOXQ_t *mbox;
13269 int rc, length, status = 0;
13270 uint32_t shdr_status, shdr_add_status;
13271 union lpfc_sli4_cfg_shdr *shdr;
13272
2e90f4b5 13273 /* sanity check on queue memory */
4f774513
JS
13274 if (!cq)
13275 return -ENODEV;
13276 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13277 if (!mbox)
13278 return -ENOMEM;
13279 length = (sizeof(struct lpfc_mbx_cq_destroy) -
13280 sizeof(struct lpfc_sli4_cfg_mhdr));
13281 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13282 LPFC_MBOX_OPCODE_CQ_DESTROY,
13283 length, LPFC_SLI4_MBX_EMBED);
13284 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13285 cq->queue_id);
13286 mbox->vport = cq->phba->pport;
13287 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13288 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13289 /* The IOCTL status is embedded in the mailbox subheader. */
13290 shdr = (union lpfc_sli4_cfg_shdr *)
13291 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13292 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13293 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13294 if (shdr_status || shdr_add_status || rc) {
13295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13296 "2506 CQ_DESTROY mailbox failed with "
13297 "status x%x add_status x%x, mbx status x%x\n",
13298 shdr_status, shdr_add_status, rc);
13299 status = -ENXIO;
13300 }
13301 /* Remove cq from any list */
13302 list_del_init(&cq->list);
8fa38513 13303 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
13304 return status;
13305}
13306
04c68496
JS
13307/**
13308 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13309 * @qm: The queue structure associated with the queue to destroy.
13310 *
13311 * This function destroys a queue, as detailed in @mq by sending an mailbox
13312 * command, specific to the type of queue, to the HBA.
13313 *
13314 * The @mq struct is used to get the queue ID of the queue to destroy.
13315 *
13316 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13317 * command fails this function will return -ENXIO.
04c68496
JS
13318 **/
13319uint32_t
13320lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13321{
13322 LPFC_MBOXQ_t *mbox;
13323 int rc, length, status = 0;
13324 uint32_t shdr_status, shdr_add_status;
13325 union lpfc_sli4_cfg_shdr *shdr;
13326
2e90f4b5 13327 /* sanity check on queue memory */
04c68496
JS
13328 if (!mq)
13329 return -ENODEV;
13330 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13331 if (!mbox)
13332 return -ENOMEM;
13333 length = (sizeof(struct lpfc_mbx_mq_destroy) -
13334 sizeof(struct lpfc_sli4_cfg_mhdr));
13335 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13336 LPFC_MBOX_OPCODE_MQ_DESTROY,
13337 length, LPFC_SLI4_MBX_EMBED);
13338 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13339 mq->queue_id);
13340 mbox->vport = mq->phba->pport;
13341 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13342 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13343 /* The IOCTL status is embedded in the mailbox subheader. */
13344 shdr = (union lpfc_sli4_cfg_shdr *)
13345 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13346 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13347 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13348 if (shdr_status || shdr_add_status || rc) {
13349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13350 "2507 MQ_DESTROY mailbox failed with "
13351 "status x%x add_status x%x, mbx status x%x\n",
13352 shdr_status, shdr_add_status, rc);
13353 status = -ENXIO;
13354 }
13355 /* Remove mq from any list */
13356 list_del_init(&mq->list);
8fa38513 13357 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
13358 return status;
13359}
13360
4f774513
JS
13361/**
13362 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13363 * @wq: The queue structure associated with the queue to destroy.
13364 *
13365 * This function destroys a queue, as detailed in @wq by sending an mailbox
13366 * command, specific to the type of queue, to the HBA.
13367 *
13368 * The @wq struct is used to get the queue ID of the queue to destroy.
13369 *
13370 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13371 * command fails this function will return -ENXIO.
4f774513
JS
13372 **/
13373uint32_t
13374lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13375{
13376 LPFC_MBOXQ_t *mbox;
13377 int rc, length, status = 0;
13378 uint32_t shdr_status, shdr_add_status;
13379 union lpfc_sli4_cfg_shdr *shdr;
13380
2e90f4b5 13381 /* sanity check on queue memory */
4f774513
JS
13382 if (!wq)
13383 return -ENODEV;
13384 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13385 if (!mbox)
13386 return -ENOMEM;
13387 length = (sizeof(struct lpfc_mbx_wq_destroy) -
13388 sizeof(struct lpfc_sli4_cfg_mhdr));
13389 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13390 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13391 length, LPFC_SLI4_MBX_EMBED);
13392 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13393 wq->queue_id);
13394 mbox->vport = wq->phba->pport;
13395 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13396 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13397 shdr = (union lpfc_sli4_cfg_shdr *)
13398 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13399 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13400 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13401 if (shdr_status || shdr_add_status || rc) {
13402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13403 "2508 WQ_DESTROY mailbox failed with "
13404 "status x%x add_status x%x, mbx status x%x\n",
13405 shdr_status, shdr_add_status, rc);
13406 status = -ENXIO;
13407 }
13408 /* Remove wq from any list */
13409 list_del_init(&wq->list);
8fa38513 13410 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
13411 return status;
13412}
13413
13414/**
13415 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13416 * @rq: The queue structure associated with the queue to destroy.
13417 *
13418 * This function destroys a queue, as detailed in @rq by sending an mailbox
13419 * command, specific to the type of queue, to the HBA.
13420 *
13421 * The @rq struct is used to get the queue ID of the queue to destroy.
13422 *
13423 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13424 * command fails this function will return -ENXIO.
4f774513
JS
13425 **/
13426uint32_t
13427lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13428 struct lpfc_queue *drq)
13429{
13430 LPFC_MBOXQ_t *mbox;
13431 int rc, length, status = 0;
13432 uint32_t shdr_status, shdr_add_status;
13433 union lpfc_sli4_cfg_shdr *shdr;
13434
2e90f4b5 13435 /* sanity check on queue memory */
4f774513
JS
13436 if (!hrq || !drq)
13437 return -ENODEV;
13438 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13439 if (!mbox)
13440 return -ENOMEM;
13441 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 13442 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
13443 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13444 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13445 length, LPFC_SLI4_MBX_EMBED);
13446 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13447 hrq->queue_id);
13448 mbox->vport = hrq->phba->pport;
13449 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13450 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13451 /* The IOCTL status is embedded in the mailbox subheader. */
13452 shdr = (union lpfc_sli4_cfg_shdr *)
13453 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13454 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13455 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13456 if (shdr_status || shdr_add_status || rc) {
13457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13458 "2509 RQ_DESTROY mailbox failed with "
13459 "status x%x add_status x%x, mbx status x%x\n",
13460 shdr_status, shdr_add_status, rc);
13461 if (rc != MBX_TIMEOUT)
13462 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13463 return -ENXIO;
13464 }
13465 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13466 drq->queue_id);
13467 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13468 shdr = (union lpfc_sli4_cfg_shdr *)
13469 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13470 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13471 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13472 if (shdr_status || shdr_add_status || rc) {
13473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13474 "2510 RQ_DESTROY mailbox failed with "
13475 "status x%x add_status x%x, mbx status x%x\n",
13476 shdr_status, shdr_add_status, rc);
13477 status = -ENXIO;
13478 }
13479 list_del_init(&hrq->list);
13480 list_del_init(&drq->list);
8fa38513 13481 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
13482 return status;
13483}
13484
13485/**
13486 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13487 * @phba: The virtual port for which this call being executed.
13488 * @pdma_phys_addr0: Physical address of the 1st SGL page.
13489 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13490 * @xritag: the xritag that ties this io to the SGL pages.
13491 *
13492 * This routine will post the sgl pages for the IO that has the xritag
13493 * that is in the iocbq structure. The xritag is assigned during iocbq
13494 * creation and persists for as long as the driver is loaded.
13495 * if the caller has fewer than 256 scatter gather segments to map then
13496 * pdma_phys_addr1 should be 0.
13497 * If the caller needs to map more than 256 scatter gather segment then
13498 * pdma_phys_addr1 should be a valid physical address.
13499 * physical address for SGLs must be 64 byte aligned.
13500 * If you are going to map 2 SGL's then the first one must have 256 entries
13501 * the second sgl can have between 1 and 256 entries.
13502 *
13503 * Return codes:
13504 * 0 - Success
13505 * -ENXIO, -ENOMEM - Failure
13506 **/
13507int
13508lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13509 dma_addr_t pdma_phys_addr0,
13510 dma_addr_t pdma_phys_addr1,
13511 uint16_t xritag)
13512{
13513 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13514 LPFC_MBOXQ_t *mbox;
13515 int rc;
13516 uint32_t shdr_status, shdr_add_status;
6d368e53 13517 uint32_t mbox_tmo;
4f774513
JS
13518 union lpfc_sli4_cfg_shdr *shdr;
13519
13520 if (xritag == NO_XRI) {
13521 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13522 "0364 Invalid param:\n");
13523 return -EINVAL;
13524 }
13525
13526 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13527 if (!mbox)
13528 return -ENOMEM;
13529
13530 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13531 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13532 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 13533 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
13534
13535 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13536 &mbox->u.mqe.un.post_sgl_pages;
13537 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13538 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13539
13540 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13541 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13542 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13543 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13544
13545 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13546 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13547 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13548 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13549 if (!phba->sli4_hba.intr_enable)
13550 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 13551 else {
a183a15f 13552 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
13553 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13554 }
4f774513
JS
13555 /* The IOCTL status is embedded in the mailbox subheader. */
13556 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13557 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13558 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13559 if (rc != MBX_TIMEOUT)
13560 mempool_free(mbox, phba->mbox_mem_pool);
13561 if (shdr_status || shdr_add_status || rc) {
13562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13563 "2511 POST_SGL mailbox failed with "
13564 "status x%x add_status x%x, mbx status x%x\n",
13565 shdr_status, shdr_add_status, rc);
13566 rc = -ENXIO;
13567 }
13568 return 0;
13569}
4f774513 13570
6d368e53 13571/**
88a2cfbb 13572 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
13573 * @phba: pointer to lpfc hba data structure.
13574 *
13575 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
13576 * HBA consistent with the SLI-4 interface spec. This routine
13577 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13578 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 13579 *
88a2cfbb
JS
13580 * Returns
13581 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13582 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
13583 **/
6d368e53
JS
13584uint16_t
13585lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13586{
13587 unsigned long xri;
13588
13589 /*
13590 * Fetch the next logical xri. Because this index is logical,
13591 * the driver starts at 0 each time.
13592 */
13593 spin_lock_irq(&phba->hbalock);
13594 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13595 phba->sli4_hba.max_cfg_param.max_xri, 0);
13596 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13597 spin_unlock_irq(&phba->hbalock);
13598 return NO_XRI;
13599 } else {
13600 set_bit(xri, phba->sli4_hba.xri_bmask);
13601 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 13602 }
6d368e53
JS
13603 spin_unlock_irq(&phba->hbalock);
13604 return xri;
13605}
13606
13607/**
13608 * lpfc_sli4_free_xri - Release an xri for reuse.
13609 * @phba: pointer to lpfc hba data structure.
13610 *
13611 * This routine is invoked to release an xri to the pool of
13612 * available rpis maintained by the driver.
13613 **/
13614void
13615__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13616{
13617 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
13618 phba->sli4_hba.max_cfg_param.xri_used--;
13619 }
13620}
13621
13622/**
13623 * lpfc_sli4_free_xri - Release an xri for reuse.
13624 * @phba: pointer to lpfc hba data structure.
13625 *
13626 * This routine is invoked to release an xri to the pool of
13627 * available rpis maintained by the driver.
13628 **/
13629void
13630lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13631{
13632 spin_lock_irq(&phba->hbalock);
13633 __lpfc_sli4_free_xri(phba, xri);
13634 spin_unlock_irq(&phba->hbalock);
13635}
13636
4f774513
JS
13637/**
13638 * lpfc_sli4_next_xritag - Get an xritag for the io
13639 * @phba: Pointer to HBA context object.
13640 *
13641 * This function gets an xritag for the iocb. If there is no unused xritag
13642 * it will return 0xffff.
13643 * The function returns the allocated xritag if successful, else returns zero.
13644 * Zero is not a valid xritag.
13645 * The caller is not required to hold any lock.
13646 **/
13647uint16_t
13648lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13649{
6d368e53 13650 uint16_t xri_index;
4f774513 13651
6d368e53 13652 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
13653 if (xri_index == NO_XRI)
13654 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13655 "2004 Failed to allocate XRI.last XRITAG is %d"
13656 " Max XRI is %d, Used XRI is %d\n",
13657 xri_index,
13658 phba->sli4_hba.max_cfg_param.max_xri,
13659 phba->sli4_hba.max_cfg_param.xri_used);
13660 return xri_index;
4f774513
JS
13661}
13662
13663/**
6d368e53 13664 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
4f774513 13665 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
13666 * @post_sgl_list: pointer to els sgl entry list.
13667 * @count: number of els sgl entries on the list.
4f774513
JS
13668 *
13669 * This routine is invoked to post a block of driver's sgl pages to the
13670 * HBA using non-embedded mailbox command. No Lock is held. This routine
13671 * is only called when the driver is loading and after all IO has been
13672 * stopped.
13673 **/
8a9d2e80
JS
13674static int
13675lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13676 struct list_head *post_sgl_list,
13677 int post_cnt)
4f774513 13678{
8a9d2e80 13679 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
13680 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13681 struct sgl_page_pairs *sgl_pg_pairs;
13682 void *viraddr;
13683 LPFC_MBOXQ_t *mbox;
13684 uint32_t reqlen, alloclen, pg_pairs;
13685 uint32_t mbox_tmo;
8a9d2e80
JS
13686 uint16_t xritag_start = 0;
13687 int rc = 0;
4f774513
JS
13688 uint32_t shdr_status, shdr_add_status;
13689 union lpfc_sli4_cfg_shdr *shdr;
13690
8a9d2e80 13691 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
4f774513 13692 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13693 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13694 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13695 "2559 Block sgl registration required DMA "
13696 "size (%d) great than a page\n", reqlen);
13697 return -ENOMEM;
13698 }
13699 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 13700 if (!mbox)
4f774513 13701 return -ENOMEM;
4f774513
JS
13702
13703 /* Allocate DMA memory and set up the non-embedded mailbox command */
13704 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13705 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13706 LPFC_SLI4_MBX_NEMBED);
13707
13708 if (alloclen < reqlen) {
13709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13710 "0285 Allocated DMA memory size (%d) is "
13711 "less than the requested DMA memory "
13712 "size (%d)\n", alloclen, reqlen);
13713 lpfc_sli4_mbox_cmd_free(phba, mbox);
13714 return -ENOMEM;
13715 }
4f774513 13716 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 13717 viraddr = mbox->sge_array->addr[0];
4f774513
JS
13718 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13719 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13720
8a9d2e80
JS
13721 pg_pairs = 0;
13722 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
13723 /* Set up the sge entry */
13724 sgl_pg_pairs->sgl_pg0_addr_lo =
13725 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13726 sgl_pg_pairs->sgl_pg0_addr_hi =
13727 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13728 sgl_pg_pairs->sgl_pg1_addr_lo =
13729 cpu_to_le32(putPaddrLow(0));
13730 sgl_pg_pairs->sgl_pg1_addr_hi =
13731 cpu_to_le32(putPaddrHigh(0));
6d368e53 13732
4f774513
JS
13733 /* Keep the first xritag on the list */
13734 if (pg_pairs == 0)
13735 xritag_start = sglq_entry->sli4_xritag;
13736 sgl_pg_pairs++;
8a9d2e80 13737 pg_pairs++;
4f774513 13738 }
6d368e53
JS
13739
13740 /* Complete initialization and perform endian conversion. */
4f774513 13741 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
8a9d2e80 13742 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
4f774513 13743 sgl->word0 = cpu_to_le32(sgl->word0);
4f774513
JS
13744 if (!phba->sli4_hba.intr_enable)
13745 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13746 else {
a183a15f 13747 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13748 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13749 }
13750 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13751 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13752 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13753 if (rc != MBX_TIMEOUT)
13754 lpfc_sli4_mbox_cmd_free(phba, mbox);
13755 if (shdr_status || shdr_add_status || rc) {
13756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13757 "2513 POST_SGL_BLOCK mailbox command failed "
13758 "status x%x add_status x%x mbx status x%x\n",
13759 shdr_status, shdr_add_status, rc);
13760 rc = -ENXIO;
13761 }
13762 return rc;
13763}
13764
13765/**
13766 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13767 * @phba: pointer to lpfc hba data structure.
13768 * @sblist: pointer to scsi buffer list.
13769 * @count: number of scsi buffers on the list.
13770 *
13771 * This routine is invoked to post a block of @count scsi sgl pages from a
13772 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13773 * No Lock is held.
13774 *
13775 **/
13776int
8a9d2e80
JS
13777lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13778 struct list_head *sblist,
13779 int count)
4f774513
JS
13780{
13781 struct lpfc_scsi_buf *psb;
13782 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13783 struct sgl_page_pairs *sgl_pg_pairs;
13784 void *viraddr;
13785 LPFC_MBOXQ_t *mbox;
13786 uint32_t reqlen, alloclen, pg_pairs;
13787 uint32_t mbox_tmo;
13788 uint16_t xritag_start = 0;
13789 int rc = 0;
13790 uint32_t shdr_status, shdr_add_status;
13791 dma_addr_t pdma_phys_bpl1;
13792 union lpfc_sli4_cfg_shdr *shdr;
13793
13794 /* Calculate the requested length of the dma memory */
8a9d2e80 13795 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 13796 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13797 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13799 "0217 Block sgl registration required DMA "
13800 "size (%d) great than a page\n", reqlen);
13801 return -ENOMEM;
13802 }
13803 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13804 if (!mbox) {
13805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13806 "0283 Failed to allocate mbox cmd memory\n");
13807 return -ENOMEM;
13808 }
13809
13810 /* Allocate DMA memory and set up the non-embedded mailbox command */
13811 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13812 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13813 LPFC_SLI4_MBX_NEMBED);
13814
13815 if (alloclen < reqlen) {
13816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13817 "2561 Allocated DMA memory size (%d) is "
13818 "less than the requested DMA memory "
13819 "size (%d)\n", alloclen, reqlen);
13820 lpfc_sli4_mbox_cmd_free(phba, mbox);
13821 return -ENOMEM;
13822 }
6d368e53 13823
4f774513 13824 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
13825 viraddr = mbox->sge_array->addr[0];
13826
13827 /* Set up the SGL pages in the non-embedded DMA pages */
13828 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13829 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13830
13831 pg_pairs = 0;
13832 list_for_each_entry(psb, sblist, list) {
13833 /* Set up the sge entry */
13834 sgl_pg_pairs->sgl_pg0_addr_lo =
13835 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13836 sgl_pg_pairs->sgl_pg0_addr_hi =
13837 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13838 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13839 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13840 else
13841 pdma_phys_bpl1 = 0;
13842 sgl_pg_pairs->sgl_pg1_addr_lo =
13843 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13844 sgl_pg_pairs->sgl_pg1_addr_hi =
13845 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13846 /* Keep the first xritag on the list */
13847 if (pg_pairs == 0)
13848 xritag_start = psb->cur_iocbq.sli4_xritag;
13849 sgl_pg_pairs++;
13850 pg_pairs++;
13851 }
13852 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13853 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13854 /* Perform endian conversion if necessary */
13855 sgl->word0 = cpu_to_le32(sgl->word0);
13856
13857 if (!phba->sli4_hba.intr_enable)
13858 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13859 else {
a183a15f 13860 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13861 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13862 }
13863 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13864 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13865 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13866 if (rc != MBX_TIMEOUT)
13867 lpfc_sli4_mbox_cmd_free(phba, mbox);
13868 if (shdr_status || shdr_add_status || rc) {
13869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13870 "2564 POST_SGL_BLOCK mailbox command failed "
13871 "status x%x add_status x%x mbx status x%x\n",
13872 shdr_status, shdr_add_status, rc);
13873 rc = -ENXIO;
13874 }
13875 return rc;
13876}
13877
13878/**
13879 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13880 * @phba: pointer to lpfc_hba struct that the frame was received on
13881 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13882 *
13883 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13884 * valid type of frame that the LPFC driver will handle. This function will
13885 * return a zero if the frame is a valid frame or a non zero value when the
13886 * frame does not pass the check.
13887 **/
13888static int
13889lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13890{
474ffb74
TH
13891 /* make rctl_names static to save stack space */
13892 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
4f774513
JS
13893 char *type_names[] = FC_TYPE_NAMES_INIT;
13894 struct fc_vft_header *fc_vft_hdr;
546fc854 13895 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
13896
13897 switch (fc_hdr->fh_r_ctl) {
13898 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13899 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13900 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13901 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13902 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13903 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13904 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13905 case FC_RCTL_DD_CMD_STATUS: /* command status */
13906 case FC_RCTL_ELS_REQ: /* extended link services request */
13907 case FC_RCTL_ELS_REP: /* extended link services reply */
13908 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13909 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13910 case FC_RCTL_BA_NOP: /* basic link service NOP */
13911 case FC_RCTL_BA_ABTS: /* basic link service abort */
13912 case FC_RCTL_BA_RMC: /* remove connection */
13913 case FC_RCTL_BA_ACC: /* basic accept */
13914 case FC_RCTL_BA_RJT: /* basic reject */
13915 case FC_RCTL_BA_PRMT:
13916 case FC_RCTL_ACK_1: /* acknowledge_1 */
13917 case FC_RCTL_ACK_0: /* acknowledge_0 */
13918 case FC_RCTL_P_RJT: /* port reject */
13919 case FC_RCTL_F_RJT: /* fabric reject */
13920 case FC_RCTL_P_BSY: /* port busy */
13921 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13922 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13923 case FC_RCTL_LCR: /* link credit reset */
13924 case FC_RCTL_END: /* end */
13925 break;
13926 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13927 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13928 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13929 return lpfc_fc_frame_check(phba, fc_hdr);
13930 default:
13931 goto drop;
13932 }
13933 switch (fc_hdr->fh_type) {
13934 case FC_TYPE_BLS:
13935 case FC_TYPE_ELS:
13936 case FC_TYPE_FCP:
13937 case FC_TYPE_CT:
13938 break;
13939 case FC_TYPE_IP:
13940 case FC_TYPE_ILS:
13941 default:
13942 goto drop;
13943 }
546fc854 13944
4f774513 13945 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
13946 "2538 Received frame rctl:%s type:%s "
13947 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
4f774513 13948 rctl_names[fc_hdr->fh_r_ctl],
546fc854
JS
13949 type_names[fc_hdr->fh_type],
13950 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13951 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13952 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
4f774513
JS
13953 return 0;
13954drop:
13955 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13956 "2539 Dropped frame rctl:%s type:%s\n",
13957 rctl_names[fc_hdr->fh_r_ctl],
13958 type_names[fc_hdr->fh_type]);
13959 return 1;
13960}
13961
13962/**
13963 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13964 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13965 *
13966 * This function processes the FC header to retrieve the VFI from the VF
13967 * header, if one exists. This function will return the VFI if one exists
13968 * or 0 if no VSAN Header exists.
13969 **/
13970static uint32_t
13971lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13972{
13973 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13974
13975 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13976 return 0;
13977 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13978}
13979
13980/**
13981 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13982 * @phba: Pointer to the HBA structure to search for the vport on
13983 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13984 * @fcfi: The FC Fabric ID that the frame came from
13985 *
13986 * This function searches the @phba for a vport that matches the content of the
13987 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13988 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13989 * returns the matching vport pointer or NULL if unable to match frame to a
13990 * vport.
13991 **/
13992static struct lpfc_vport *
13993lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13994 uint16_t fcfi)
13995{
13996 struct lpfc_vport **vports;
13997 struct lpfc_vport *vport = NULL;
13998 int i;
13999 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14000 fc_hdr->fh_d_id[1] << 8 |
14001 fc_hdr->fh_d_id[2]);
939723a4 14002
bf08611b
JS
14003 if (did == Fabric_DID)
14004 return phba->pport;
939723a4
JS
14005 if ((phba->pport->fc_flag & FC_PT2PT) &&
14006 !(phba->link_state == LPFC_HBA_READY))
14007 return phba->pport;
14008
4f774513
JS
14009 vports = lpfc_create_vport_work_array(phba);
14010 if (vports != NULL)
14011 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14012 if (phba->fcf.fcfi == fcfi &&
14013 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14014 vports[i]->fc_myDID == did) {
14015 vport = vports[i];
14016 break;
14017 }
14018 }
14019 lpfc_destroy_vport_work_array(phba, vports);
14020 return vport;
14021}
14022
45ed1190
JS
14023/**
14024 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14025 * @vport: The vport to work on.
14026 *
14027 * This function updates the receive sequence time stamp for this vport. The
14028 * receive sequence time stamp indicates the time that the last frame of the
14029 * the sequence that has been idle for the longest amount of time was received.
14030 * the driver uses this time stamp to indicate if any received sequences have
14031 * timed out.
14032 **/
14033void
14034lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14035{
14036 struct lpfc_dmabuf *h_buf;
14037 struct hbq_dmabuf *dmabuf = NULL;
14038
14039 /* get the oldest sequence on the rcv list */
14040 h_buf = list_get_first(&vport->rcv_buffer_list,
14041 struct lpfc_dmabuf, list);
14042 if (!h_buf)
14043 return;
14044 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14045 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14046}
14047
14048/**
14049 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14050 * @vport: The vport that the received sequences were sent to.
14051 *
14052 * This function cleans up all outstanding received sequences. This is called
14053 * by the driver when a link event or user action invalidates all the received
14054 * sequences.
14055 **/
14056void
14057lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14058{
14059 struct lpfc_dmabuf *h_buf, *hnext;
14060 struct lpfc_dmabuf *d_buf, *dnext;
14061 struct hbq_dmabuf *dmabuf = NULL;
14062
14063 /* start with the oldest sequence on the rcv list */
14064 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14065 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14066 list_del_init(&dmabuf->hbuf.list);
14067 list_for_each_entry_safe(d_buf, dnext,
14068 &dmabuf->dbuf.list, list) {
14069 list_del_init(&d_buf->list);
14070 lpfc_in_buf_free(vport->phba, d_buf);
14071 }
14072 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14073 }
14074}
14075
14076/**
14077 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14078 * @vport: The vport that the received sequences were sent to.
14079 *
14080 * This function determines whether any received sequences have timed out by
14081 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14082 * indicates that there is at least one timed out sequence this routine will
14083 * go through the received sequences one at a time from most inactive to most
14084 * active to determine which ones need to be cleaned up. Once it has determined
14085 * that a sequence needs to be cleaned up it will simply free up the resources
14086 * without sending an abort.
14087 **/
14088void
14089lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14090{
14091 struct lpfc_dmabuf *h_buf, *hnext;
14092 struct lpfc_dmabuf *d_buf, *dnext;
14093 struct hbq_dmabuf *dmabuf = NULL;
14094 unsigned long timeout;
14095 int abort_count = 0;
14096
14097 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14098 vport->rcv_buffer_time_stamp);
14099 if (list_empty(&vport->rcv_buffer_list) ||
14100 time_before(jiffies, timeout))
14101 return;
14102 /* start with the oldest sequence on the rcv list */
14103 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14104 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14105 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14106 dmabuf->time_stamp);
14107 if (time_before(jiffies, timeout))
14108 break;
14109 abort_count++;
14110 list_del_init(&dmabuf->hbuf.list);
14111 list_for_each_entry_safe(d_buf, dnext,
14112 &dmabuf->dbuf.list, list) {
14113 list_del_init(&d_buf->list);
14114 lpfc_in_buf_free(vport->phba, d_buf);
14115 }
14116 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14117 }
14118 if (abort_count)
14119 lpfc_update_rcv_time_stamp(vport);
14120}
14121
4f774513
JS
14122/**
14123 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14124 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14125 *
14126 * This function searches through the existing incomplete sequences that have
14127 * been sent to this @vport. If the frame matches one of the incomplete
14128 * sequences then the dbuf in the @dmabuf is added to the list of frames that
14129 * make up that sequence. If no sequence is found that matches this frame then
14130 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14131 * This function returns a pointer to the first dmabuf in the sequence list that
14132 * the frame was linked to.
14133 **/
14134static struct hbq_dmabuf *
14135lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14136{
14137 struct fc_frame_header *new_hdr;
14138 struct fc_frame_header *temp_hdr;
14139 struct lpfc_dmabuf *d_buf;
14140 struct lpfc_dmabuf *h_buf;
14141 struct hbq_dmabuf *seq_dmabuf = NULL;
14142 struct hbq_dmabuf *temp_dmabuf = NULL;
14143
4d9ab994 14144 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 14145 dmabuf->time_stamp = jiffies;
4f774513
JS
14146 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14147 /* Use the hdr_buf to find the sequence that this frame belongs to */
14148 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14149 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14150 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14151 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14152 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14153 continue;
14154 /* found a pending sequence that matches this frame */
14155 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14156 break;
14157 }
14158 if (!seq_dmabuf) {
14159 /*
14160 * This indicates first frame received for this sequence.
14161 * Queue the buffer on the vport's rcv_buffer_list.
14162 */
14163 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 14164 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
14165 return dmabuf;
14166 }
14167 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
14168 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
14169 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
14170 list_del_init(&seq_dmabuf->hbuf.list);
14171 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14172 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 14173 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
14174 return dmabuf;
14175 }
45ed1190
JS
14176 /* move this sequence to the tail to indicate a young sequence */
14177 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
14178 seq_dmabuf->time_stamp = jiffies;
14179 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
14180 if (list_empty(&seq_dmabuf->dbuf.list)) {
14181 temp_hdr = dmabuf->hbuf.virt;
14182 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14183 return seq_dmabuf;
14184 }
4f774513
JS
14185 /* find the correct place in the sequence to insert this frame */
14186 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
14187 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14188 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14189 /*
14190 * If the frame's sequence count is greater than the frame on
14191 * the list then insert the frame right after this frame
14192 */
eeead811
JS
14193 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14194 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513
JS
14195 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14196 return seq_dmabuf;
14197 }
14198 }
14199 return NULL;
14200}
14201
6669f9bb
JS
14202/**
14203 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14204 * @vport: pointer to a vitural port
14205 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14206 *
14207 * This function tries to abort from the partially assembed sequence, described
14208 * by the information from basic abbort @dmabuf. It checks to see whether such
14209 * partially assembled sequence held by the driver. If so, it shall free up all
14210 * the frames from the partially assembled sequence.
14211 *
14212 * Return
14213 * true -- if there is matching partially assembled sequence present and all
14214 * the frames freed with the sequence;
14215 * false -- if there is no matching partially assembled sequence present so
14216 * nothing got aborted in the lower layer driver
14217 **/
14218static bool
14219lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14220 struct hbq_dmabuf *dmabuf)
14221{
14222 struct fc_frame_header *new_hdr;
14223 struct fc_frame_header *temp_hdr;
14224 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14225 struct hbq_dmabuf *seq_dmabuf = NULL;
14226
14227 /* Use the hdr_buf to find the sequence that matches this frame */
14228 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14229 INIT_LIST_HEAD(&dmabuf->hbuf.list);
14230 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14231 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14232 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14233 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14234 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14235 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14236 continue;
14237 /* found a pending sequence that matches this frame */
14238 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14239 break;
14240 }
14241
14242 /* Free up all the frames from the partially assembled sequence */
14243 if (seq_dmabuf) {
14244 list_for_each_entry_safe(d_buf, n_buf,
14245 &seq_dmabuf->dbuf.list, list) {
14246 list_del_init(&d_buf->list);
14247 lpfc_in_buf_free(vport->phba, d_buf);
14248 }
14249 return true;
14250 }
14251 return false;
14252}
14253
6dd9e31c
JS
14254/**
14255 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14256 * @vport: pointer to a vitural port
14257 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14258 *
14259 * This function tries to abort from the assembed sequence from upper level
14260 * protocol, described by the information from basic abbort @dmabuf. It
14261 * checks to see whether such pending context exists at upper level protocol.
14262 * If so, it shall clean up the pending context.
14263 *
14264 * Return
14265 * true -- if there is matching pending context of the sequence cleaned
14266 * at ulp;
14267 * false -- if there is no matching pending context of the sequence present
14268 * at ulp.
14269 **/
14270static bool
14271lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14272{
14273 struct lpfc_hba *phba = vport->phba;
14274 int handled;
14275
14276 /* Accepting abort at ulp with SLI4 only */
14277 if (phba->sli_rev < LPFC_SLI_REV4)
14278 return false;
14279
14280 /* Register all caring upper level protocols to attend abort */
14281 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14282 if (handled)
14283 return true;
14284
14285 return false;
14286}
14287
6669f9bb 14288/**
546fc854 14289 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
14290 * @phba: Pointer to HBA context object.
14291 * @cmd_iocbq: pointer to the command iocbq structure.
14292 * @rsp_iocbq: pointer to the response iocbq structure.
14293 *
546fc854 14294 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
14295 * event. It properly releases the memory allocated to the sequence abort
14296 * accept iocb.
14297 **/
14298static void
546fc854 14299lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
14300 struct lpfc_iocbq *cmd_iocbq,
14301 struct lpfc_iocbq *rsp_iocbq)
14302{
6dd9e31c
JS
14303 struct lpfc_nodelist *ndlp;
14304
14305 if (cmd_iocbq) {
14306 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14307 lpfc_nlp_put(ndlp);
14308 lpfc_nlp_not_used(ndlp);
6669f9bb 14309 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 14310 }
6b5151fd
JS
14311
14312 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14313 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14314 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14315 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
14316 rsp_iocbq->iocb.ulpStatus,
14317 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
14318}
14319
6d368e53
JS
14320/**
14321 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14322 * @phba: Pointer to HBA context object.
14323 * @xri: xri id in transaction.
14324 *
14325 * This function validates the xri maps to the known range of XRIs allocated an
14326 * used by the driver.
14327 **/
7851fe2c 14328uint16_t
6d368e53
JS
14329lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14330 uint16_t xri)
14331{
14332 int i;
14333
14334 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14335 if (xri == phba->sli4_hba.xri_ids[i])
14336 return i;
14337 }
14338 return NO_XRI;
14339}
14340
6669f9bb 14341/**
546fc854 14342 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
14343 * @phba: Pointer to HBA context object.
14344 * @fc_hdr: pointer to a FC frame header.
14345 *
546fc854 14346 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
14347 * event after aborting the sequence handling.
14348 **/
14349static void
6dd9e31c
JS
14350lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14351 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 14352{
6dd9e31c 14353 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
14354 struct lpfc_iocbq *ctiocb = NULL;
14355 struct lpfc_nodelist *ndlp;
ee0f4fe1 14356 uint16_t oxid, rxid, xri, lxri;
5ffc266e 14357 uint32_t sid, fctl;
6669f9bb 14358 IOCB_t *icmd;
546fc854 14359 int rc;
6669f9bb
JS
14360
14361 if (!lpfc_is_link_up(phba))
14362 return;
14363
14364 sid = sli4_sid_from_fc_hdr(fc_hdr);
14365 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 14366 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 14367
6dd9e31c 14368 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 14369 if (!ndlp) {
6dd9e31c
JS
14370 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14371 if (!ndlp) {
14372 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14373 "1268 Failed to allocate ndlp for "
14374 "oxid:x%x SID:x%x\n", oxid, sid);
14375 return;
14376 }
14377 lpfc_nlp_init(vport, ndlp, sid);
14378 /* Put ndlp onto pport node list */
14379 lpfc_enqueue_node(vport, ndlp);
14380 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14381 /* re-setup ndlp without removing from node list */
14382 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14383 if (!ndlp) {
14384 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14385 "3275 Failed to active ndlp found "
14386 "for oxid:x%x SID:x%x\n", oxid, sid);
14387 return;
14388 }
6669f9bb
JS
14389 }
14390
546fc854 14391 /* Allocate buffer for rsp iocb */
6669f9bb
JS
14392 ctiocb = lpfc_sli_get_iocbq(phba);
14393 if (!ctiocb)
14394 return;
14395
5ffc266e
JS
14396 /* Extract the F_CTL field from FC_HDR */
14397 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14398
6669f9bb 14399 icmd = &ctiocb->iocb;
6669f9bb 14400 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 14401 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
14402 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14403 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14404 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14405
14406 /* Fill in the rest of iocb fields */
14407 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14408 icmd->ulpBdeCount = 0;
14409 icmd->ulpLe = 1;
14410 icmd->ulpClass = CLASS3;
6d368e53 14411 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 14412 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 14413
6669f9bb
JS
14414 ctiocb->iocb_cmpl = NULL;
14415 ctiocb->vport = phba->pport;
546fc854 14416 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 14417 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
14418 ctiocb->sli4_xritag = NO_XRI;
14419
ee0f4fe1
JS
14420 if (fctl & FC_FC_EX_CTX)
14421 /* Exchange responder sent the abort so we
14422 * own the oxid.
14423 */
14424 xri = oxid;
14425 else
14426 xri = rxid;
14427 lxri = lpfc_sli4_xri_inrange(phba, xri);
14428 if (lxri != NO_XRI)
14429 lpfc_set_rrq_active(phba, ndlp, lxri,
14430 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
14431 /* For BA_ABTS from exchange responder, if the logical xri with
14432 * the oxid maps to the FCP XRI range, the port no longer has
14433 * that exchange context, send a BLS_RJT. Override the IOCB for
14434 * a BA_RJT.
14435 */
14436 if ((fctl & FC_FC_EX_CTX) &&
14437 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14438 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14439 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14440 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14441 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14442 }
14443
14444 /* If BA_ABTS failed to abort a partially assembled receive sequence,
14445 * the driver no longer has that exchange, send a BLS_RJT. Override
14446 * the IOCB for a BA_RJT.
546fc854 14447 */
6dd9e31c 14448 if (aborted == false) {
546fc854
JS
14449 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14450 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14451 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14452 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14453 }
6669f9bb 14454
5ffc266e
JS
14455 if (fctl & FC_FC_EX_CTX) {
14456 /* ABTS sent by responder to CT exchange, construction
14457 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14458 * field and RX_ID from ABTS for RX_ID field.
14459 */
546fc854 14460 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
14461 } else {
14462 /* ABTS sent by initiator to CT exchange, construction
14463 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 14464 * XRI_TAG field.
5ffc266e 14465 */
546fc854 14466 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 14467 }
f09c3acc 14468 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 14469 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 14470
546fc854 14471 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
14472 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14473 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14474 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
14475
14476 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14477 if (rc == IOCB_ERROR) {
6dd9e31c
JS
14478 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14479 "2925 Failed to issue CT ABTS RSP x%x on "
14480 "xri x%x, Data x%x\n",
14481 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14482 phba->link_state);
14483 lpfc_nlp_put(ndlp);
14484 ctiocb->context1 = NULL;
546fc854
JS
14485 lpfc_sli_release_iocbq(phba, ctiocb);
14486 }
6669f9bb
JS
14487}
14488
14489/**
14490 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14491 * @vport: Pointer to the vport on which this sequence was received
14492 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14493 *
14494 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14495 * receive sequence is only partially assembed by the driver, it shall abort
14496 * the partially assembled frames for the sequence. Otherwise, if the
14497 * unsolicited receive sequence has been completely assembled and passed to
14498 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14499 * unsolicited sequence has been aborted. After that, it will issue a basic
14500 * accept to accept the abort.
14501 **/
14502void
14503lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14504 struct hbq_dmabuf *dmabuf)
14505{
14506 struct lpfc_hba *phba = vport->phba;
14507 struct fc_frame_header fc_hdr;
5ffc266e 14508 uint32_t fctl;
6dd9e31c 14509 bool aborted;
6669f9bb 14510
6669f9bb
JS
14511 /* Make a copy of fc_hdr before the dmabuf being released */
14512 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 14513 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 14514
5ffc266e 14515 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
14516 /* ABTS by responder to exchange, no cleanup needed */
14517 aborted = true;
5ffc266e 14518 } else {
6dd9e31c
JS
14519 /* ABTS by initiator to exchange, need to do cleanup */
14520 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14521 if (aborted == false)
14522 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 14523 }
6dd9e31c
JS
14524 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14525
14526 /* Respond with BA_ACC or BA_RJT accordingly */
14527 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
14528}
14529
4f774513
JS
14530/**
14531 * lpfc_seq_complete - Indicates if a sequence is complete
14532 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14533 *
14534 * This function checks the sequence, starting with the frame described by
14535 * @dmabuf, to see if all the frames associated with this sequence are present.
14536 * the frames associated with this sequence are linked to the @dmabuf using the
14537 * dbuf list. This function looks for two major things. 1) That the first frame
14538 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14539 * set. 3) That there are no holes in the sequence count. The function will
14540 * return 1 when the sequence is complete, otherwise it will return 0.
14541 **/
14542static int
14543lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14544{
14545 struct fc_frame_header *hdr;
14546 struct lpfc_dmabuf *d_buf;
14547 struct hbq_dmabuf *seq_dmabuf;
14548 uint32_t fctl;
14549 int seq_count = 0;
14550
14551 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14552 /* make sure first fame of sequence has a sequence count of zero */
14553 if (hdr->fh_seq_cnt != seq_count)
14554 return 0;
14555 fctl = (hdr->fh_f_ctl[0] << 16 |
14556 hdr->fh_f_ctl[1] << 8 |
14557 hdr->fh_f_ctl[2]);
14558 /* If last frame of sequence we can return success. */
14559 if (fctl & FC_FC_END_SEQ)
14560 return 1;
14561 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14562 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14563 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14564 /* If there is a hole in the sequence count then fail. */
eeead811 14565 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
14566 return 0;
14567 fctl = (hdr->fh_f_ctl[0] << 16 |
14568 hdr->fh_f_ctl[1] << 8 |
14569 hdr->fh_f_ctl[2]);
14570 /* If last frame of sequence we can return success. */
14571 if (fctl & FC_FC_END_SEQ)
14572 return 1;
14573 }
14574 return 0;
14575}
14576
14577/**
14578 * lpfc_prep_seq - Prep sequence for ULP processing
14579 * @vport: Pointer to the vport on which this sequence was received
14580 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14581 *
14582 * This function takes a sequence, described by a list of frames, and creates
14583 * a list of iocbq structures to describe the sequence. This iocbq list will be
14584 * used to issue to the generic unsolicited sequence handler. This routine
14585 * returns a pointer to the first iocbq in the list. If the function is unable
14586 * to allocate an iocbq then it throw out the received frames that were not
14587 * able to be described and return a pointer to the first iocbq. If unable to
14588 * allocate any iocbqs (including the first) this function will return NULL.
14589 **/
14590static struct lpfc_iocbq *
14591lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14592{
7851fe2c 14593 struct hbq_dmabuf *hbq_buf;
4f774513
JS
14594 struct lpfc_dmabuf *d_buf, *n_buf;
14595 struct lpfc_iocbq *first_iocbq, *iocbq;
14596 struct fc_frame_header *fc_hdr;
14597 uint32_t sid;
7851fe2c 14598 uint32_t len, tot_len;
eeead811 14599 struct ulp_bde64 *pbde;
4f774513
JS
14600
14601 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14602 /* remove from receive buffer list */
14603 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 14604 lpfc_update_rcv_time_stamp(vport);
4f774513 14605 /* get the Remote Port's SID */
6669f9bb 14606 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 14607 tot_len = 0;
4f774513
JS
14608 /* Get an iocbq struct to fill in. */
14609 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14610 if (first_iocbq) {
14611 /* Initialize the first IOCB. */
8fa38513 14612 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 14613 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
939723a4
JS
14614
14615 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14616 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14617 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14618 first_iocbq->iocb.un.rcvels.parmRo =
14619 sli4_did_from_fc_hdr(fc_hdr);
14620 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14621 } else
14622 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
14623 first_iocbq->iocb.ulpContext = NO_XRI;
14624 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14625 be16_to_cpu(fc_hdr->fh_ox_id);
14626 /* iocbq is prepped for internal consumption. Physical vpi. */
14627 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14628 vport->phba->vpi_ids[vport->vpi];
4f774513
JS
14629 /* put the first buffer into the first IOCBq */
14630 first_iocbq->context2 = &seq_dmabuf->dbuf;
14631 first_iocbq->context3 = NULL;
14632 first_iocbq->iocb.ulpBdeCount = 1;
14633 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14634 LPFC_DATA_BUF_SIZE;
14635 first_iocbq->iocb.un.rcvels.remoteID = sid;
7851fe2c 14636 tot_len = bf_get(lpfc_rcqe_length,
4d9ab994 14637 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7851fe2c 14638 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
14639 }
14640 iocbq = first_iocbq;
14641 /*
14642 * Each IOCBq can have two Buffers assigned, so go through the list
14643 * of buffers for this sequence and save two buffers in each IOCBq
14644 */
14645 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14646 if (!iocbq) {
14647 lpfc_in_buf_free(vport->phba, d_buf);
14648 continue;
14649 }
14650 if (!iocbq->context3) {
14651 iocbq->context3 = d_buf;
14652 iocbq->iocb.ulpBdeCount++;
eeead811
JS
14653 pbde = (struct ulp_bde64 *)
14654 &iocbq->iocb.unsli3.sli3Words[4];
14655 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14656
14657 /* We need to get the size out of the right CQE */
14658 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14659 len = bf_get(lpfc_rcqe_length,
14660 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14661 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14662 tot_len += len;
4f774513
JS
14663 } else {
14664 iocbq = lpfc_sli_get_iocbq(vport->phba);
14665 if (!iocbq) {
14666 if (first_iocbq) {
14667 first_iocbq->iocb.ulpStatus =
14668 IOSTAT_FCP_RSP_ERROR;
14669 first_iocbq->iocb.un.ulpWord[4] =
14670 IOERR_NO_RESOURCES;
14671 }
14672 lpfc_in_buf_free(vport->phba, d_buf);
14673 continue;
14674 }
14675 iocbq->context2 = d_buf;
14676 iocbq->context3 = NULL;
14677 iocbq->iocb.ulpBdeCount = 1;
14678 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14679 LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14680
14681 /* We need to get the size out of the right CQE */
14682 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14683 len = bf_get(lpfc_rcqe_length,
14684 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14685 tot_len += len;
14686 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14687
4f774513
JS
14688 iocbq->iocb.un.rcvels.remoteID = sid;
14689 list_add_tail(&iocbq->list, &first_iocbq->list);
14690 }
14691 }
14692 return first_iocbq;
14693}
14694
6669f9bb
JS
14695static void
14696lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14697 struct hbq_dmabuf *seq_dmabuf)
14698{
14699 struct fc_frame_header *fc_hdr;
14700 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14701 struct lpfc_hba *phba = vport->phba;
14702
14703 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14704 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14705 if (!iocbq) {
14706 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14707 "2707 Ring %d handler: Failed to allocate "
14708 "iocb Rctl x%x Type x%x received\n",
14709 LPFC_ELS_RING,
14710 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14711 return;
14712 }
14713 if (!lpfc_complete_unsol_iocb(phba,
14714 &phba->sli.ring[LPFC_ELS_RING],
14715 iocbq, fc_hdr->fh_r_ctl,
14716 fc_hdr->fh_type))
6d368e53 14717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
14718 "2540 Ring %d handler: unexpected Rctl "
14719 "x%x Type x%x received\n",
14720 LPFC_ELS_RING,
14721 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14722
14723 /* Free iocb created in lpfc_prep_seq */
14724 list_for_each_entry_safe(curr_iocb, next_iocb,
14725 &iocbq->list, list) {
14726 list_del_init(&curr_iocb->list);
14727 lpfc_sli_release_iocbq(phba, curr_iocb);
14728 }
14729 lpfc_sli_release_iocbq(phba, iocbq);
14730}
14731
4f774513
JS
14732/**
14733 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14734 * @phba: Pointer to HBA context object.
14735 *
14736 * This function is called with no lock held. This function processes all
14737 * the received buffers and gives it to upper layers when a received buffer
14738 * indicates that it is the final frame in the sequence. The interrupt
14739 * service routine processes received buffers at interrupt contexts and adds
14740 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14741 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14742 * appropriate receive function when the final frame in a sequence is received.
14743 **/
4d9ab994
JS
14744void
14745lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14746 struct hbq_dmabuf *dmabuf)
4f774513 14747{
4d9ab994 14748 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
14749 struct fc_frame_header *fc_hdr;
14750 struct lpfc_vport *vport;
14751 uint32_t fcfi;
939723a4 14752 uint32_t did;
4f774513 14753
4f774513 14754 /* Process each received buffer */
4d9ab994
JS
14755 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14756 /* check to see if this a valid type of frame */
14757 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14758 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14759 return;
14760 }
7851fe2c
JS
14761 if ((bf_get(lpfc_cqe_code,
14762 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14763 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14764 &dmabuf->cq_event.cqe.rcqe_cmpl);
14765 else
14766 fcfi = bf_get(lpfc_rcqe_fcf_id,
14767 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 14768
4d9ab994 14769 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
939723a4 14770 if (!vport) {
4d9ab994
JS
14771 /* throw out the frame */
14772 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14773 return;
14774 }
939723a4
JS
14775
14776 /* d_id this frame is directed to */
14777 did = sli4_did_from_fc_hdr(fc_hdr);
14778
14779 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14780 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14781 (did != Fabric_DID)) {
14782 /*
14783 * Throw out the frame if we are not pt2pt.
14784 * The pt2pt protocol allows for discovery frames
14785 * to be received without a registered VPI.
14786 */
14787 if (!(vport->fc_flag & FC_PT2PT) ||
14788 (phba->link_state == LPFC_HBA_READY)) {
14789 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14790 return;
14791 }
14792 }
14793
6669f9bb
JS
14794 /* Handle the basic abort sequence (BA_ABTS) event */
14795 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14796 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14797 return;
14798 }
14799
4d9ab994
JS
14800 /* Link this frame */
14801 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14802 if (!seq_dmabuf) {
14803 /* unable to add frame to vport - throw it out */
14804 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14805 return;
14806 }
14807 /* If not last frame in sequence continue processing frames. */
def9c7a9 14808 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 14809 return;
def9c7a9 14810
6669f9bb
JS
14811 /* Send the complete sequence to the upper layer protocol */
14812 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 14813}
6fb120a7
JS
14814
14815/**
14816 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14817 * @phba: pointer to lpfc hba data structure.
14818 *
14819 * This routine is invoked to post rpi header templates to the
14820 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14821 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14822 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14823 *
14824 * This routine does not require any locks. It's usage is expected
14825 * to be driver load or reset recovery when the driver is
14826 * sequential.
14827 *
14828 * Return codes
af901ca1 14829 * 0 - successful
d439d286 14830 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14831 * When this error occurs, the driver is not guaranteed
14832 * to have any rpi regions posted to the device and
14833 * must either attempt to repost the regions or take a
14834 * fatal error.
14835 **/
14836int
14837lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14838{
14839 struct lpfc_rpi_hdr *rpi_page;
14840 uint32_t rc = 0;
6d368e53
JS
14841 uint16_t lrpi = 0;
14842
14843 /* SLI4 ports that support extents do not require RPI headers. */
14844 if (!phba->sli4_hba.rpi_hdrs_in_use)
14845 goto exit;
14846 if (phba->sli4_hba.extents_in_use)
14847 return -EIO;
6fb120a7 14848
6fb120a7 14849 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
14850 /*
14851 * Assign the rpi headers a physical rpi only if the driver
14852 * has not initialized those resources. A port reset only
14853 * needs the headers posted.
14854 */
14855 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14856 LPFC_RPI_RSRC_RDY)
14857 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14858
6fb120a7
JS
14859 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14860 if (rc != MBX_SUCCESS) {
14861 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14862 "2008 Error %d posting all rpi "
14863 "headers\n", rc);
14864 rc = -EIO;
14865 break;
14866 }
14867 }
14868
6d368e53
JS
14869 exit:
14870 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14871 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
14872 return rc;
14873}
14874
14875/**
14876 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14877 * @phba: pointer to lpfc hba data structure.
14878 * @rpi_page: pointer to the rpi memory region.
14879 *
14880 * This routine is invoked to post a single rpi header to the
14881 * HBA consistent with the SLI-4 interface spec. This memory region
14882 * maps up to 64 rpi context regions.
14883 *
14884 * Return codes
af901ca1 14885 * 0 - successful
d439d286
JS
14886 * -ENOMEM - No available memory
14887 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14888 **/
14889int
14890lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14891{
14892 LPFC_MBOXQ_t *mboxq;
14893 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14894 uint32_t rc = 0;
6fb120a7
JS
14895 uint32_t shdr_status, shdr_add_status;
14896 union lpfc_sli4_cfg_shdr *shdr;
14897
6d368e53
JS
14898 /* SLI4 ports that support extents do not require RPI headers. */
14899 if (!phba->sli4_hba.rpi_hdrs_in_use)
14900 return rc;
14901 if (phba->sli4_hba.extents_in_use)
14902 return -EIO;
14903
6fb120a7
JS
14904 /* The port is notified of the header region via a mailbox command. */
14905 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14906 if (!mboxq) {
14907 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14908 "2001 Unable to allocate memory for issuing "
14909 "SLI_CONFIG_SPECIAL mailbox command\n");
14910 return -ENOMEM;
14911 }
14912
14913 /* Post all rpi memory regions to the port. */
14914 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
14915 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14916 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14917 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
14918 sizeof(struct lpfc_sli4_cfg_mhdr),
14919 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
14920
14921
14922 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
14923 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14924 rpi_page->start_rpi);
6d368e53
JS
14925 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14926 hdr_tmpl, rpi_page->page_count);
14927
6fb120a7
JS
14928 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14929 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 14930 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
14931 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14932 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14933 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14934 if (rc != MBX_TIMEOUT)
14935 mempool_free(mboxq, phba->mbox_mem_pool);
14936 if (shdr_status || shdr_add_status || rc) {
14937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14938 "2514 POST_RPI_HDR mailbox failed with "
14939 "status x%x add_status x%x, mbx status x%x\n",
14940 shdr_status, shdr_add_status, rc);
14941 rc = -ENXIO;
14942 }
14943 return rc;
14944}
14945
14946/**
14947 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14948 * @phba: pointer to lpfc hba data structure.
14949 *
14950 * This routine is invoked to post rpi header templates to the
14951 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14952 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14953 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14954 *
14955 * Returns
af901ca1 14956 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
14957 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14958 **/
14959int
14960lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14961{
6d368e53
JS
14962 unsigned long rpi;
14963 uint16_t max_rpi, rpi_limit;
14964 uint16_t rpi_remaining, lrpi = 0;
6fb120a7
JS
14965 struct lpfc_rpi_hdr *rpi_hdr;
14966
14967 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6fb120a7
JS
14968 rpi_limit = phba->sli4_hba.next_rpi;
14969
14970 /*
6d368e53
JS
14971 * Fetch the next logical rpi. Because this index is logical,
14972 * the driver starts at 0 each time.
6fb120a7
JS
14973 */
14974 spin_lock_irq(&phba->hbalock);
6d368e53
JS
14975 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14976 if (rpi >= rpi_limit)
6fb120a7
JS
14977 rpi = LPFC_RPI_ALLOC_ERROR;
14978 else {
14979 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14980 phba->sli4_hba.max_cfg_param.rpi_used++;
14981 phba->sli4_hba.rpi_count++;
14982 }
14983
14984 /*
14985 * Don't try to allocate more rpi header regions if the device limit
6d368e53 14986 * has been exhausted.
6fb120a7
JS
14987 */
14988 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14989 (phba->sli4_hba.rpi_count >= max_rpi)) {
14990 spin_unlock_irq(&phba->hbalock);
14991 return rpi;
14992 }
14993
6d368e53
JS
14994 /*
14995 * RPI header postings are not required for SLI4 ports capable of
14996 * extents.
14997 */
14998 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14999 spin_unlock_irq(&phba->hbalock);
15000 return rpi;
15001 }
15002
6fb120a7
JS
15003 /*
15004 * If the driver is running low on rpi resources, allocate another
15005 * page now. Note that the next_rpi value is used because
15006 * it represents how many are actually in use whereas max_rpi notes
15007 * how many are supported max by the device.
15008 */
6d368e53 15009 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
6fb120a7
JS
15010 spin_unlock_irq(&phba->hbalock);
15011 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15012 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15013 if (!rpi_hdr) {
15014 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15015 "2002 Error Could not grow rpi "
15016 "count\n");
15017 } else {
6d368e53
JS
15018 lrpi = rpi_hdr->start_rpi;
15019 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
15020 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15021 }
15022 }
15023
15024 return rpi;
15025}
15026
d7c47992
JS
15027/**
15028 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15029 * @phba: pointer to lpfc hba data structure.
15030 *
15031 * This routine is invoked to release an rpi to the pool of
15032 * available rpis maintained by the driver.
15033 **/
15034void
15035__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15036{
15037 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15038 phba->sli4_hba.rpi_count--;
15039 phba->sli4_hba.max_cfg_param.rpi_used--;
15040 }
15041}
15042
6fb120a7
JS
15043/**
15044 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15045 * @phba: pointer to lpfc hba data structure.
15046 *
15047 * This routine is invoked to release an rpi to the pool of
15048 * available rpis maintained by the driver.
15049 **/
15050void
15051lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15052{
15053 spin_lock_irq(&phba->hbalock);
d7c47992 15054 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
15055 spin_unlock_irq(&phba->hbalock);
15056}
15057
15058/**
15059 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15060 * @phba: pointer to lpfc hba data structure.
15061 *
15062 * This routine is invoked to remove the memory region that
15063 * provided rpi via a bitmask.
15064 **/
15065void
15066lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15067{
15068 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
15069 kfree(phba->sli4_hba.rpi_ids);
15070 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
15071}
15072
15073/**
15074 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15075 * @phba: pointer to lpfc hba data structure.
15076 *
15077 * This routine is invoked to remove the memory region that
15078 * provided rpi via a bitmask.
15079 **/
15080int
6b5151fd
JS
15081lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
15082 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
15083{
15084 LPFC_MBOXQ_t *mboxq;
15085 struct lpfc_hba *phba = ndlp->phba;
15086 int rc;
15087
15088 /* The port is notified of the header region via a mailbox command. */
15089 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15090 if (!mboxq)
15091 return -ENOMEM;
15092
15093 /* Post all rpi memory regions to the port. */
15094 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
15095 if (cmpl) {
15096 mboxq->mbox_cmpl = cmpl;
15097 mboxq->context1 = arg;
15098 mboxq->context2 = ndlp;
72859909
JS
15099 } else
15100 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 15101 mboxq->vport = ndlp->vport;
6fb120a7
JS
15102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15103 if (rc == MBX_NOT_FINISHED) {
15104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15105 "2010 Resume RPI Mailbox failed "
15106 "status %d, mbxStatus x%x\n", rc,
15107 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15108 mempool_free(mboxq, phba->mbox_mem_pool);
15109 return -EIO;
15110 }
15111 return 0;
15112}
15113
15114/**
15115 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 15116 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 15117 *
76a95d75 15118 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
15119 *
15120 * Returns:
15121 * 0 success
15122 * -Evalue otherwise
15123 **/
15124int
76a95d75 15125lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
15126{
15127 LPFC_MBOXQ_t *mboxq;
15128 int rc = 0;
6a9c52cf 15129 int retval = MBX_SUCCESS;
6fb120a7 15130 uint32_t mbox_tmo;
76a95d75 15131 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
15132 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15133 if (!mboxq)
15134 return -ENOMEM;
76a95d75 15135 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 15136 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 15137 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 15138 if (rc != MBX_SUCCESS) {
76a95d75 15139 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
15140 "2022 INIT VPI Mailbox failed "
15141 "status %d, mbxStatus x%x\n", rc,
15142 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 15143 retval = -EIO;
6fb120a7 15144 }
6a9c52cf 15145 if (rc != MBX_TIMEOUT)
76a95d75 15146 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
15147
15148 return retval;
6fb120a7
JS
15149}
15150
15151/**
15152 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15153 * @phba: pointer to lpfc hba data structure.
15154 * @mboxq: Pointer to mailbox object.
15155 *
15156 * This routine is invoked to manually add a single FCF record. The caller
15157 * must pass a completely initialized FCF_Record. This routine takes
15158 * care of the nonembedded mailbox operations.
15159 **/
15160static void
15161lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
15162{
15163 void *virt_addr;
15164 union lpfc_sli4_cfg_shdr *shdr;
15165 uint32_t shdr_status, shdr_add_status;
15166
15167 virt_addr = mboxq->sge_array->addr[0];
15168 /* The IOCTL status is embedded in the mailbox subheader. */
15169 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
15170 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15171 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15172
15173 if ((shdr_status || shdr_add_status) &&
15174 (shdr_status != STATUS_FCF_IN_USE))
15175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15176 "2558 ADD_FCF_RECORD mailbox failed with "
15177 "status x%x add_status x%x\n",
15178 shdr_status, shdr_add_status);
15179
15180 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15181}
15182
15183/**
15184 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15185 * @phba: pointer to lpfc hba data structure.
15186 * @fcf_record: pointer to the initialized fcf record to add.
15187 *
15188 * This routine is invoked to manually add a single FCF record. The caller
15189 * must pass a completely initialized FCF_Record. This routine takes
15190 * care of the nonembedded mailbox operations.
15191 **/
15192int
15193lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
15194{
15195 int rc = 0;
15196 LPFC_MBOXQ_t *mboxq;
15197 uint8_t *bytep;
15198 void *virt_addr;
15199 dma_addr_t phys_addr;
15200 struct lpfc_mbx_sge sge;
15201 uint32_t alloc_len, req_len;
15202 uint32_t fcfindex;
15203
15204 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15205 if (!mboxq) {
15206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15207 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15208 return -ENOMEM;
15209 }
15210
15211 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
15212 sizeof(uint32_t);
15213
15214 /* Allocate DMA memory and set up the non-embedded mailbox command */
15215 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15216 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
15217 req_len, LPFC_SLI4_MBX_NEMBED);
15218 if (alloc_len < req_len) {
15219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15220 "2523 Allocated DMA memory size (x%x) is "
15221 "less than the requested DMA memory "
15222 "size (x%x)\n", alloc_len, req_len);
15223 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15224 return -ENOMEM;
15225 }
15226
15227 /*
15228 * Get the first SGE entry from the non-embedded DMA memory. This
15229 * routine only uses a single SGE.
15230 */
15231 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
15232 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
15233 virt_addr = mboxq->sge_array->addr[0];
15234 /*
15235 * Configure the FCF record for FCFI 0. This is the driver's
15236 * hardcoded default and gets used in nonFIP mode.
15237 */
15238 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
15239 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
15240 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
15241
15242 /*
15243 * Copy the fcf_index and the FCF Record Data. The data starts after
15244 * the FCoE header plus word10. The data copy needs to be endian
15245 * correct.
15246 */
15247 bytep += sizeof(uint32_t);
15248 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
15249 mboxq->vport = phba->pport;
15250 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
15251 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15252 if (rc == MBX_NOT_FINISHED) {
15253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15254 "2515 ADD_FCF_RECORD mailbox failed with "
15255 "status 0x%x\n", rc);
15256 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15257 rc = -EIO;
15258 } else
15259 rc = 0;
15260
15261 return rc;
15262}
15263
15264/**
15265 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15266 * @phba: pointer to lpfc hba data structure.
15267 * @fcf_record: pointer to the fcf record to write the default data.
15268 * @fcf_index: FCF table entry index.
15269 *
15270 * This routine is invoked to build the driver's default FCF record. The
15271 * values used are hardcoded. This routine handles memory initialization.
15272 *
15273 **/
15274void
15275lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15276 struct fcf_record *fcf_record,
15277 uint16_t fcf_index)
15278{
15279 memset(fcf_record, 0, sizeof(struct fcf_record));
15280 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15281 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15282 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15283 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15284 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15285 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15286 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15287 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15288 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15289 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15290 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15291 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15292 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 15293 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
15294 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15295 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15296 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15297 /* Set the VLAN bit map */
15298 if (phba->valid_vlan) {
15299 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15300 = 1 << (phba->vlan_id % 8);
15301 }
15302}
15303
15304/**
0c9ab6f5 15305 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
15306 * @phba: pointer to lpfc hba data structure.
15307 * @fcf_index: FCF table entry offset.
15308 *
0c9ab6f5
JS
15309 * This routine is invoked to scan the entire FCF table by reading FCF
15310 * record and processing it one at a time starting from the @fcf_index
15311 * for initial FCF discovery or fast FCF failover rediscovery.
15312 *
25985edc 15313 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 15314 * otherwise.
6fb120a7
JS
15315 **/
15316int
0c9ab6f5 15317lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
15318{
15319 int rc = 0, error;
15320 LPFC_MBOXQ_t *mboxq;
6fb120a7 15321
32b9793f 15322 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 15323 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
15324 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15325 if (!mboxq) {
15326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15327 "2000 Failed to allocate mbox for "
15328 "READ_FCF cmd\n");
4d9ab994 15329 error = -ENOMEM;
0c9ab6f5 15330 goto fail_fcf_scan;
6fb120a7 15331 }
ecfd03c6 15332 /* Construct the read FCF record mailbox command */
0c9ab6f5 15333 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
15334 if (rc) {
15335 error = -EINVAL;
0c9ab6f5 15336 goto fail_fcf_scan;
6fb120a7 15337 }
ecfd03c6 15338 /* Issue the mailbox command asynchronously */
6fb120a7 15339 mboxq->vport = phba->pport;
0c9ab6f5 15340 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
15341
15342 spin_lock_irq(&phba->hbalock);
15343 phba->hba_flag |= FCF_TS_INPROG;
15344 spin_unlock_irq(&phba->hbalock);
15345
6fb120a7 15346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 15347 if (rc == MBX_NOT_FINISHED)
6fb120a7 15348 error = -EIO;
ecfd03c6 15349 else {
38b92ef8
JS
15350 /* Reset eligible FCF count for new scan */
15351 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 15352 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 15353 error = 0;
32b9793f 15354 }
0c9ab6f5 15355fail_fcf_scan:
4d9ab994
JS
15356 if (error) {
15357 if (mboxq)
15358 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 15359 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 15360 spin_lock_irq(&phba->hbalock);
a93ff37a 15361 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
15362 spin_unlock_irq(&phba->hbalock);
15363 }
6fb120a7
JS
15364 return error;
15365}
a0c87cbd 15366
0c9ab6f5 15367/**
a93ff37a 15368 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
15369 * @phba: pointer to lpfc hba data structure.
15370 * @fcf_index: FCF table entry offset.
15371 *
15372 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 15373 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 15374 *
25985edc 15375 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15376 * otherwise.
15377 **/
15378int
15379lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15380{
15381 int rc = 0, error;
15382 LPFC_MBOXQ_t *mboxq;
15383
15384 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15385 if (!mboxq) {
15386 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15387 "2763 Failed to allocate mbox for "
15388 "READ_FCF cmd\n");
15389 error = -ENOMEM;
15390 goto fail_fcf_read;
15391 }
15392 /* Construct the read FCF record mailbox command */
15393 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15394 if (rc) {
15395 error = -EINVAL;
15396 goto fail_fcf_read;
15397 }
15398 /* Issue the mailbox command asynchronously */
15399 mboxq->vport = phba->pport;
15400 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15401 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15402 if (rc == MBX_NOT_FINISHED)
15403 error = -EIO;
15404 else
15405 error = 0;
15406
15407fail_fcf_read:
15408 if (error && mboxq)
15409 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15410 return error;
15411}
15412
15413/**
15414 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15415 * @phba: pointer to lpfc hba data structure.
15416 * @fcf_index: FCF table entry offset.
15417 *
15418 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 15419 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 15420 *
25985edc 15421 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15422 * otherwise.
15423 **/
15424int
15425lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15426{
15427 int rc = 0, error;
15428 LPFC_MBOXQ_t *mboxq;
15429
15430 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15431 if (!mboxq) {
15432 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15433 "2758 Failed to allocate mbox for "
15434 "READ_FCF cmd\n");
15435 error = -ENOMEM;
15436 goto fail_fcf_read;
15437 }
15438 /* Construct the read FCF record mailbox command */
15439 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15440 if (rc) {
15441 error = -EINVAL;
15442 goto fail_fcf_read;
15443 }
15444 /* Issue the mailbox command asynchronously */
15445 mboxq->vport = phba->pport;
15446 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15447 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15448 if (rc == MBX_NOT_FINISHED)
15449 error = -EIO;
15450 else
15451 error = 0;
15452
15453fail_fcf_read:
15454 if (error && mboxq)
15455 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15456 return error;
15457}
15458
7d791df7
JS
15459/**
15460 * lpfc_check_next_fcf_pri
15461 * phba pointer to the lpfc_hba struct for this port.
15462 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15463 * routine when the rr_bmask is empty. The FCF indecies are put into the
15464 * rr_bmask based on their priority level. Starting from the highest priority
15465 * to the lowest. The most likely FCF candidate will be in the highest
15466 * priority group. When this routine is called it searches the fcf_pri list for
15467 * next lowest priority group and repopulates the rr_bmask with only those
15468 * fcf_indexes.
15469 * returns:
15470 * 1=success 0=failure
15471 **/
15472int
15473lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15474{
15475 uint16_t next_fcf_pri;
15476 uint16_t last_index;
15477 struct lpfc_fcf_pri *fcf_pri;
15478 int rc;
15479 int ret = 0;
15480
15481 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15482 LPFC_SLI4_FCF_TBL_INDX_MAX);
15483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15484 "3060 Last IDX %d\n", last_index);
15485 if (list_empty(&phba->fcf.fcf_pri_list)) {
15486 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15487 "3061 Last IDX %d\n", last_index);
15488 return 0; /* Empty rr list */
15489 }
15490 next_fcf_pri = 0;
15491 /*
15492 * Clear the rr_bmask and set all of the bits that are at this
15493 * priority.
15494 */
15495 memset(phba->fcf.fcf_rr_bmask, 0,
15496 sizeof(*phba->fcf.fcf_rr_bmask));
15497 spin_lock_irq(&phba->hbalock);
15498 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15499 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15500 continue;
15501 /*
15502 * the 1st priority that has not FLOGI failed
15503 * will be the highest.
15504 */
15505 if (!next_fcf_pri)
15506 next_fcf_pri = fcf_pri->fcf_rec.priority;
15507 spin_unlock_irq(&phba->hbalock);
15508 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15509 rc = lpfc_sli4_fcf_rr_index_set(phba,
15510 fcf_pri->fcf_rec.fcf_index);
15511 if (rc)
15512 return 0;
15513 }
15514 spin_lock_irq(&phba->hbalock);
15515 }
15516 /*
15517 * if next_fcf_pri was not set above and the list is not empty then
15518 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 15519 * and start at the beginning.
7d791df7
JS
15520 */
15521 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15522 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15523 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15524 /*
15525 * the 1st priority that has not FLOGI failed
15526 * will be the highest.
15527 */
15528 if (!next_fcf_pri)
15529 next_fcf_pri = fcf_pri->fcf_rec.priority;
15530 spin_unlock_irq(&phba->hbalock);
15531 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15532 rc = lpfc_sli4_fcf_rr_index_set(phba,
15533 fcf_pri->fcf_rec.fcf_index);
15534 if (rc)
15535 return 0;
15536 }
15537 spin_lock_irq(&phba->hbalock);
15538 }
15539 } else
15540 ret = 1;
15541 spin_unlock_irq(&phba->hbalock);
15542
15543 return ret;
15544}
0c9ab6f5
JS
15545/**
15546 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15547 * @phba: pointer to lpfc hba data structure.
15548 *
15549 * This routine is to get the next eligible FCF record index in a round
15550 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 15551 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
15552 * shall be returned, otherwise, the next eligible FCF record's index
15553 * shall be returned.
15554 **/
15555uint16_t
15556lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15557{
15558 uint16_t next_fcf_index;
15559
421c6622 15560initial_priority:
3804dc84 15561 /* Search start from next bit of currently registered FCF index */
421c6622
JS
15562 next_fcf_index = phba->fcf.current_rec.fcf_indx;
15563
7d791df7 15564next_priority:
421c6622
JS
15565 /* Determine the next fcf index to check */
15566 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
15567 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15568 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
15569 next_fcf_index);
15570
0c9ab6f5 15571 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
15572 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15573 /*
15574 * If we have wrapped then we need to clear the bits that
15575 * have been tested so that we can detect when we should
15576 * change the priority level.
15577 */
0c9ab6f5
JS
15578 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15579 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
15580 }
15581
3804dc84
JS
15582
15583 /* Check roundrobin failover list empty condition */
7d791df7
JS
15584 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15585 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15586 /*
15587 * If next fcf index is not found check if there are lower
15588 * Priority level fcf's in the fcf_priority list.
15589 * Set up the rr_bmask with all of the avaiable fcf bits
15590 * at that level and continue the selection process.
15591 */
15592 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 15593 goto initial_priority;
3804dc84
JS
15594 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15595 "2844 No roundrobin failover FCF available\n");
7d791df7
JS
15596 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15597 return LPFC_FCOE_FCF_NEXT_NONE;
15598 else {
15599 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15600 "3063 Only FCF available idx %d, flag %x\n",
15601 next_fcf_index,
15602 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15603 return next_fcf_index;
15604 }
3804dc84
JS
15605 }
15606
7d791df7
JS
15607 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15608 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15609 LPFC_FCF_FLOGI_FAILED)
15610 goto next_priority;
15611
3804dc84 15612 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
15613 "2845 Get next roundrobin failover FCF (x%x)\n",
15614 next_fcf_index);
15615
0c9ab6f5
JS
15616 return next_fcf_index;
15617}
15618
15619/**
15620 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15621 * @phba: pointer to lpfc hba data structure.
15622 *
15623 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 15624 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15625 * does not go beyond the range of the driver allocated bmask dimension
15626 * before setting the bit.
15627 *
15628 * Returns 0 if the index bit successfully set, otherwise, it returns
15629 * -EINVAL.
15630 **/
15631int
15632lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15633{
15634 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15635 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15636 "2610 FCF (x%x) reached driver's book "
15637 "keeping dimension:x%x\n",
0c9ab6f5
JS
15638 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15639 return -EINVAL;
15640 }
15641 /* Set the eligible FCF record index bmask */
15642 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15643
3804dc84 15644 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15645 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
15646 "bmask\n", fcf_index);
15647
0c9ab6f5
JS
15648 return 0;
15649}
15650
15651/**
3804dc84 15652 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
15653 * @phba: pointer to lpfc hba data structure.
15654 *
15655 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 15656 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15657 * does not go beyond the range of the driver allocated bmask dimension
15658 * before clearing the bit.
15659 **/
15660void
15661lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15662{
7d791df7 15663 struct lpfc_fcf_pri *fcf_pri;
0c9ab6f5
JS
15664 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15665 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15666 "2762 FCF (x%x) reached driver's book "
15667 "keeping dimension:x%x\n",
0c9ab6f5
JS
15668 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15669 return;
15670 }
15671 /* Clear the eligible FCF record index bmask */
7d791df7
JS
15672 spin_lock_irq(&phba->hbalock);
15673 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15674 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15675 list_del_init(&fcf_pri->list);
15676 break;
15677 }
15678 }
15679 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 15680 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
15681
15682 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15683 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 15684 "bmask\n", fcf_index);
0c9ab6f5
JS
15685}
15686
ecfd03c6
JS
15687/**
15688 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15689 * @phba: pointer to lpfc hba data structure.
15690 *
15691 * This routine is the completion routine for the rediscover FCF table mailbox
15692 * command. If the mailbox command returned failure, it will try to stop the
15693 * FCF rediscover wait timer.
15694 **/
15695void
15696lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15697{
15698 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15699 uint32_t shdr_status, shdr_add_status;
15700
15701 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15702
15703 shdr_status = bf_get(lpfc_mbox_hdr_status,
15704 &redisc_fcf->header.cfg_shdr.response);
15705 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15706 &redisc_fcf->header.cfg_shdr.response);
15707 if (shdr_status || shdr_add_status) {
0c9ab6f5 15708 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
15709 "2746 Requesting for FCF rediscovery failed "
15710 "status x%x add_status x%x\n",
15711 shdr_status, shdr_add_status);
0c9ab6f5 15712 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 15713 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15714 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
15715 spin_unlock_irq(&phba->hbalock);
15716 /*
15717 * CVL event triggered FCF rediscover request failed,
15718 * last resort to re-try current registered FCF entry.
15719 */
15720 lpfc_retry_pport_discovery(phba);
15721 } else {
15722 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15723 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
15724 spin_unlock_irq(&phba->hbalock);
15725 /*
15726 * DEAD FCF event triggered FCF rediscover request
15727 * failed, last resort to fail over as a link down
15728 * to FCF registration.
15729 */
15730 lpfc_sli4_fcf_dead_failthrough(phba);
15731 }
0c9ab6f5
JS
15732 } else {
15733 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15734 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
15735 /*
15736 * Start FCF rediscovery wait timer for pending FCF
15737 * before rescan FCF record table.
15738 */
15739 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 15740 }
ecfd03c6
JS
15741
15742 mempool_free(mbox, phba->mbox_mem_pool);
15743}
15744
15745/**
3804dc84 15746 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
15747 * @phba: pointer to lpfc hba data structure.
15748 *
15749 * This routine is invoked to request for rediscovery of the entire FCF table
15750 * by the port.
15751 **/
15752int
15753lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15754{
15755 LPFC_MBOXQ_t *mbox;
15756 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15757 int rc, length;
15758
0c9ab6f5
JS
15759 /* Cancel retry delay timers to all vports before FCF rediscover */
15760 lpfc_cancel_all_vport_retry_delay_timer(phba);
15761
ecfd03c6
JS
15762 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15763 if (!mbox) {
15764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15765 "2745 Failed to allocate mbox for "
15766 "requesting FCF rediscover.\n");
15767 return -ENOMEM;
15768 }
15769
15770 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15771 sizeof(struct lpfc_sli4_cfg_mhdr));
15772 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15773 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15774 length, LPFC_SLI4_MBX_EMBED);
15775
15776 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15777 /* Set count to 0 for invalidating the entire FCF database */
15778 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15779
15780 /* Issue the mailbox command asynchronously */
15781 mbox->vport = phba->pport;
15782 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15783 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15784
15785 if (rc == MBX_NOT_FINISHED) {
15786 mempool_free(mbox, phba->mbox_mem_pool);
15787 return -EIO;
15788 }
15789 return 0;
15790}
15791
fc2b989b
JS
15792/**
15793 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15794 * @phba: pointer to lpfc hba data structure.
15795 *
15796 * This function is the failover routine as a last resort to the FCF DEAD
15797 * event when driver failed to perform fast FCF failover.
15798 **/
15799void
15800lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15801{
15802 uint32_t link_state;
15803
15804 /*
15805 * Last resort as FCF DEAD event failover will treat this as
15806 * a link down, but save the link state because we don't want
15807 * it to be changed to Link Down unless it is already down.
15808 */
15809 link_state = phba->link_state;
15810 lpfc_linkdown(phba);
15811 phba->link_state = link_state;
15812
15813 /* Unregister FCF if no devices connected to it */
15814 lpfc_unregister_unused_fcf(phba);
15815}
15816
a0c87cbd 15817/**
026abb87 15818 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 15819 * @phba: pointer to lpfc hba data structure.
026abb87 15820 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 15821 *
026abb87
JS
15822 * This function gets SLI3 port configure region 23 data through memory dump
15823 * mailbox command. When it successfully retrieves data, the size of the data
15824 * will be returned, otherwise, 0 will be returned.
a0c87cbd 15825 **/
026abb87
JS
15826static uint32_t
15827lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
15828{
15829 LPFC_MBOXQ_t *pmb = NULL;
15830 MAILBOX_t *mb;
026abb87 15831 uint32_t offset = 0;
a0c87cbd
JS
15832 int rc;
15833
026abb87
JS
15834 if (!rgn23_data)
15835 return 0;
15836
a0c87cbd
JS
15837 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15838 if (!pmb) {
15839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
15840 "2600 failed to allocate mailbox memory\n");
15841 return 0;
a0c87cbd
JS
15842 }
15843 mb = &pmb->u.mb;
15844
a0c87cbd
JS
15845 do {
15846 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15847 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15848
15849 if (rc != MBX_SUCCESS) {
15850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
15851 "2601 failed to read config "
15852 "region 23, rc 0x%x Status 0x%x\n",
15853 rc, mb->mbxStatus);
a0c87cbd
JS
15854 mb->un.varDmp.word_cnt = 0;
15855 }
15856 /*
15857 * dump mem may return a zero when finished or we got a
15858 * mailbox error, either way we are done.
15859 */
15860 if (mb->un.varDmp.word_cnt == 0)
15861 break;
15862 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15863 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15864
15865 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
15866 rgn23_data + offset,
15867 mb->un.varDmp.word_cnt);
a0c87cbd
JS
15868 offset += mb->un.varDmp.word_cnt;
15869 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15870
026abb87
JS
15871 mempool_free(pmb, phba->mbox_mem_pool);
15872 return offset;
15873}
15874
15875/**
15876 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15877 * @phba: pointer to lpfc hba data structure.
15878 * @rgn23_data: pointer to configure region 23 data.
15879 *
15880 * This function gets SLI4 port configure region 23 data through memory dump
15881 * mailbox command. When it successfully retrieves data, the size of the data
15882 * will be returned, otherwise, 0 will be returned.
15883 **/
15884static uint32_t
15885lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15886{
15887 LPFC_MBOXQ_t *mboxq = NULL;
15888 struct lpfc_dmabuf *mp = NULL;
15889 struct lpfc_mqe *mqe;
15890 uint32_t data_length = 0;
15891 int rc;
15892
15893 if (!rgn23_data)
15894 return 0;
15895
15896 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15897 if (!mboxq) {
15898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15899 "3105 failed to allocate mailbox memory\n");
15900 return 0;
15901 }
15902
15903 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15904 goto out;
15905 mqe = &mboxq->u.mqe;
15906 mp = (struct lpfc_dmabuf *) mboxq->context1;
15907 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15908 if (rc)
15909 goto out;
15910 data_length = mqe->un.mb_words[5];
15911 if (data_length == 0)
15912 goto out;
15913 if (data_length > DMP_RGN23_SIZE) {
15914 data_length = 0;
15915 goto out;
15916 }
15917 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15918out:
15919 mempool_free(mboxq, phba->mbox_mem_pool);
15920 if (mp) {
15921 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15922 kfree(mp);
15923 }
15924 return data_length;
15925}
15926
15927/**
15928 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15929 * @phba: pointer to lpfc hba data structure.
15930 *
15931 * This function read region 23 and parse TLV for port status to
15932 * decide if the user disaled the port. If the TLV indicates the
15933 * port is disabled, the hba_flag is set accordingly.
15934 **/
15935void
15936lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15937{
15938 uint8_t *rgn23_data = NULL;
15939 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15940 uint32_t offset = 0;
15941
15942 /* Get adapter Region 23 data */
15943 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15944 if (!rgn23_data)
15945 goto out;
15946
15947 if (phba->sli_rev < LPFC_SLI_REV4)
15948 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15949 else {
15950 if_type = bf_get(lpfc_sli_intf_if_type,
15951 &phba->sli4_hba.sli_intf);
15952 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15953 goto out;
15954 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15955 }
a0c87cbd
JS
15956
15957 if (!data_size)
15958 goto out;
15959
15960 /* Check the region signature first */
15961 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15963 "2619 Config region 23 has bad signature\n");
15964 goto out;
15965 }
15966 offset += 4;
15967
15968 /* Check the data structure version */
15969 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15971 "2620 Config region 23 has bad version\n");
15972 goto out;
15973 }
15974 offset += 4;
15975
15976 /* Parse TLV entries in the region */
15977 while (offset < data_size) {
15978 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15979 break;
15980 /*
15981 * If the TLV is not driver specific TLV or driver id is
15982 * not linux driver id, skip the record.
15983 */
15984 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15985 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15986 (rgn23_data[offset + 3] != 0)) {
15987 offset += rgn23_data[offset + 1] * 4 + 4;
15988 continue;
15989 }
15990
15991 /* Driver found a driver specific TLV in the config region */
15992 sub_tlv_len = rgn23_data[offset + 1] * 4;
15993 offset += 4;
15994 tlv_offset = 0;
15995
15996 /*
15997 * Search for configured port state sub-TLV.
15998 */
15999 while ((offset < data_size) &&
16000 (tlv_offset < sub_tlv_len)) {
16001 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16002 offset += 4;
16003 tlv_offset += 4;
16004 break;
16005 }
16006 if (rgn23_data[offset] != PORT_STE_TYPE) {
16007 offset += rgn23_data[offset + 1] * 4 + 4;
16008 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16009 continue;
16010 }
16011
16012 /* This HBA contains PORT_STE configured */
16013 if (!rgn23_data[offset + 2])
16014 phba->hba_flag |= LINK_DISABLED;
16015
16016 goto out;
16017 }
16018 }
026abb87 16019
a0c87cbd 16020out:
a0c87cbd
JS
16021 kfree(rgn23_data);
16022 return;
16023}
695a814e 16024
52d52440
JS
16025/**
16026 * lpfc_wr_object - write an object to the firmware
16027 * @phba: HBA structure that indicates port to create a queue on.
16028 * @dmabuf_list: list of dmabufs to write to the port.
16029 * @size: the total byte value of the objects to write to the port.
16030 * @offset: the current offset to be used to start the transfer.
16031 *
16032 * This routine will create a wr_object mailbox command to send to the port.
16033 * the mailbox command will be constructed using the dma buffers described in
16034 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16035 * BDEs that the imbedded mailbox can support. The @offset variable will be
16036 * used to indicate the starting offset of the transfer and will also return
16037 * the offset after the write object mailbox has completed. @size is used to
16038 * determine the end of the object and whether the eof bit should be set.
16039 *
16040 * Return 0 is successful and offset will contain the the new offset to use
16041 * for the next write.
16042 * Return negative value for error cases.
16043 **/
16044int
16045lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16046 uint32_t size, uint32_t *offset)
16047{
16048 struct lpfc_mbx_wr_object *wr_object;
16049 LPFC_MBOXQ_t *mbox;
16050 int rc = 0, i = 0;
16051 uint32_t shdr_status, shdr_add_status;
16052 uint32_t mbox_tmo;
16053 union lpfc_sli4_cfg_shdr *shdr;
16054 struct lpfc_dmabuf *dmabuf;
16055 uint32_t written = 0;
16056
16057 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16058 if (!mbox)
16059 return -ENOMEM;
16060
16061 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16062 LPFC_MBOX_OPCODE_WRITE_OBJECT,
16063 sizeof(struct lpfc_mbx_wr_object) -
16064 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16065
16066 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
16067 wr_object->u.request.write_offset = *offset;
16068 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
16069 wr_object->u.request.object_name[0] =
16070 cpu_to_le32(wr_object->u.request.object_name[0]);
16071 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
16072 list_for_each_entry(dmabuf, dmabuf_list, list) {
16073 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
16074 break;
16075 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
16076 wr_object->u.request.bde[i].addrHigh =
16077 putPaddrHigh(dmabuf->phys);
16078 if (written + SLI4_PAGE_SIZE >= size) {
16079 wr_object->u.request.bde[i].tus.f.bdeSize =
16080 (size - written);
16081 written += (size - written);
16082 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
16083 } else {
16084 wr_object->u.request.bde[i].tus.f.bdeSize =
16085 SLI4_PAGE_SIZE;
16086 written += SLI4_PAGE_SIZE;
16087 }
16088 i++;
16089 }
16090 wr_object->u.request.bde_count = i;
16091 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
16092 if (!phba->sli4_hba.intr_enable)
16093 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16094 else {
a183a15f 16095 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
16096 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16097 }
16098 /* The IOCTL status is embedded in the mailbox subheader. */
16099 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
16100 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16101 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16102 if (rc != MBX_TIMEOUT)
16103 mempool_free(mbox, phba->mbox_mem_pool);
16104 if (shdr_status || shdr_add_status || rc) {
16105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16106 "3025 Write Object mailbox failed with "
16107 "status x%x add_status x%x, mbx status x%x\n",
16108 shdr_status, shdr_add_status, rc);
16109 rc = -ENXIO;
16110 } else
16111 *offset += wr_object->u.response.actual_write_length;
16112 return rc;
16113}
16114
695a814e
JS
16115/**
16116 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16117 * @vport: pointer to vport data structure.
16118 *
16119 * This function iterate through the mailboxq and clean up all REG_LOGIN
16120 * and REG_VPI mailbox commands associated with the vport. This function
16121 * is called when driver want to restart discovery of the vport due to
16122 * a Clear Virtual Link event.
16123 **/
16124void
16125lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
16126{
16127 struct lpfc_hba *phba = vport->phba;
16128 LPFC_MBOXQ_t *mb, *nextmb;
16129 struct lpfc_dmabuf *mp;
78730cfe 16130 struct lpfc_nodelist *ndlp;
d439d286 16131 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 16132 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 16133 LIST_HEAD(mbox_cmd_list);
63e801ce 16134 uint8_t restart_loop;
695a814e 16135
d439d286 16136 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
16137 spin_lock_irq(&phba->hbalock);
16138 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
16139 if (mb->vport != vport)
16140 continue;
16141
16142 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16143 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16144 continue;
16145
d439d286
JS
16146 list_del(&mb->list);
16147 list_add_tail(&mb->list, &mbox_cmd_list);
16148 }
16149 /* Clean up active mailbox command with the vport */
16150 mb = phba->sli.mbox_active;
16151 if (mb && (mb->vport == vport)) {
16152 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
16153 (mb->u.mb.mbxCommand == MBX_REG_VPI))
16154 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16155 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16156 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
16157 /* Put reference count for delayed processing */
16158 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
16159 /* Unregister the RPI when mailbox complete */
16160 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16161 }
16162 }
63e801ce
JS
16163 /* Cleanup any mailbox completions which are not yet processed */
16164 do {
16165 restart_loop = 0;
16166 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
16167 /*
16168 * If this mailox is already processed or it is
16169 * for another vport ignore it.
16170 */
16171 if ((mb->vport != vport) ||
16172 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
16173 continue;
16174
16175 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16176 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16177 continue;
16178
16179 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16180 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16181 ndlp = (struct lpfc_nodelist *)mb->context2;
16182 /* Unregister the RPI when mailbox complete */
16183 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16184 restart_loop = 1;
16185 spin_unlock_irq(&phba->hbalock);
16186 spin_lock(shost->host_lock);
16187 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16188 spin_unlock(shost->host_lock);
16189 spin_lock_irq(&phba->hbalock);
16190 break;
16191 }
16192 }
16193 } while (restart_loop);
16194
d439d286
JS
16195 spin_unlock_irq(&phba->hbalock);
16196
16197 /* Release the cleaned-up mailbox commands */
16198 while (!list_empty(&mbox_cmd_list)) {
16199 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e
JS
16200 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16201 mp = (struct lpfc_dmabuf *) (mb->context1);
16202 if (mp) {
16203 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
16204 kfree(mp);
16205 }
78730cfe 16206 ndlp = (struct lpfc_nodelist *) mb->context2;
d439d286 16207 mb->context2 = NULL;
78730cfe 16208 if (ndlp) {
ec21b3b0 16209 spin_lock(shost->host_lock);
589a52d6 16210 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 16211 spin_unlock(shost->host_lock);
78730cfe 16212 lpfc_nlp_put(ndlp);
78730cfe 16213 }
695a814e 16214 }
695a814e
JS
16215 mempool_free(mb, phba->mbox_mem_pool);
16216 }
d439d286
JS
16217
16218 /* Release the ndlp with the cleaned-up active mailbox command */
16219 if (act_mbx_ndlp) {
16220 spin_lock(shost->host_lock);
16221 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16222 spin_unlock(shost->host_lock);
16223 lpfc_nlp_put(act_mbx_ndlp);
695a814e 16224 }
695a814e
JS
16225}
16226
2a9bf3d0
JS
16227/**
16228 * lpfc_drain_txq - Drain the txq
16229 * @phba: Pointer to HBA context object.
16230 *
16231 * This function attempt to submit IOCBs on the txq
16232 * to the adapter. For SLI4 adapters, the txq contains
16233 * ELS IOCBs that have been deferred because the there
16234 * are no SGLs. This congestion can occur with large
16235 * vport counts during node discovery.
16236 **/
16237
16238uint32_t
16239lpfc_drain_txq(struct lpfc_hba *phba)
16240{
16241 LIST_HEAD(completions);
16242 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
16243 struct lpfc_iocbq *piocbq = 0;
16244 unsigned long iflags = 0;
16245 char *fail_msg = NULL;
16246 struct lpfc_sglq *sglq;
16247 union lpfc_wqe wqe;
16248
16249 spin_lock_irqsave(&phba->hbalock, iflags);
16250 if (pring->txq_cnt > pring->txq_max)
16251 pring->txq_max = pring->txq_cnt;
16252
16253 spin_unlock_irqrestore(&phba->hbalock, iflags);
16254
16255 while (pring->txq_cnt) {
16256 spin_lock_irqsave(&phba->hbalock, iflags);
16257
19ca7609 16258 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a
JS
16259 if (!piocbq) {
16260 spin_unlock_irqrestore(&phba->hbalock, iflags);
16261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16262 "2823 txq empty and txq_cnt is %d\n ",
16263 pring->txq_cnt);
16264 break;
16265 }
19ca7609 16266 sglq = __lpfc_sli_get_sglq(phba, piocbq);
2a9bf3d0 16267 if (!sglq) {
19ca7609 16268 __lpfc_sli_ringtx_put(phba, pring, piocbq);
2a9bf3d0
JS
16269 spin_unlock_irqrestore(&phba->hbalock, iflags);
16270 break;
2a9bf3d0
JS
16271 }
16272
16273 /* The xri and iocb resources secured,
16274 * attempt to issue request
16275 */
6d368e53 16276 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
16277 piocbq->sli4_xritag = sglq->sli4_xritag;
16278 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16279 fail_msg = "to convert bpl to sgl";
16280 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16281 fail_msg = "to convert iocb to wqe";
16282 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16283 fail_msg = " - Wq is full";
16284 else
16285 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16286
16287 if (fail_msg) {
16288 /* Failed means we can't issue and need to cancel */
16289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16290 "2822 IOCB failed %s iotag 0x%x "
16291 "xri 0x%x\n",
16292 fail_msg,
16293 piocbq->iotag, piocbq->sli4_xritag);
16294 list_add_tail(&piocbq->list, &completions);
16295 }
16296 spin_unlock_irqrestore(&phba->hbalock, iflags);
16297 }
16298
2a9bf3d0
JS
16299 /* Cancel all the IOCBs that cannot be issued */
16300 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16301 IOERR_SLI_ABORTED);
16302
16303 return pring->txq_cnt;
16304}
This page took 2.488557 seconds and 5 git commands to generate.