1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
4 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
17 static int bnx2fc_split_bd(struct bnx2fc_cmd
*io_req
, u64 addr
, int sg_len
,
19 static int bnx2fc_map_sg(struct bnx2fc_cmd
*io_req
);
20 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd
*io_req
);
21 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd
*io_req
);
22 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd
*io_req
);
23 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd
*io_req
,
24 struct fcoe_fcp_rsp_payload
*fcp_rsp
,
27 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd
*io_req
,
28 unsigned int timer_msec
)
30 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
32 if (queue_delayed_work(interface
->timer_work_queue
,
33 &io_req
->timeout_work
,
34 msecs_to_jiffies(timer_msec
)))
35 kref_get(&io_req
->refcount
);
38 static void bnx2fc_cmd_timeout(struct work_struct
*work
)
40 struct bnx2fc_cmd
*io_req
= container_of(work
, struct bnx2fc_cmd
,
42 struct fc_lport
*lport
;
43 struct fc_rport_priv
*rdata
;
44 u8 cmd_type
= io_req
->cmd_type
;
45 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
49 BNX2FC_IO_DBG(io_req
, "cmd_timeout, cmd_type = %d,"
50 "req_flags = %lx\n", cmd_type
, io_req
->req_flags
);
52 spin_lock_bh(&tgt
->tgt_lock
);
53 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ
, &io_req
->req_flags
)) {
54 clear_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
);
56 * ideally we should hold the io_req until RRQ complets,
57 * and release io_req from timeout hold.
59 spin_unlock_bh(&tgt
->tgt_lock
);
60 bnx2fc_send_rrq(io_req
);
63 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
)) {
64 BNX2FC_IO_DBG(io_req
, "IO ready for reuse now\n");
70 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
71 &io_req
->req_flags
)) {
72 /* Handle eh_abort timeout */
73 BNX2FC_IO_DBG(io_req
, "eh_abort timed out\n");
74 complete(&io_req
->tm_done
);
75 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
,
76 &io_req
->req_flags
)) {
77 /* Handle internally generated ABTS timeout */
78 BNX2FC_IO_DBG(io_req
, "ABTS timed out refcnt = %d\n",
79 io_req
->refcount
.refcount
.counter
);
80 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
81 &io_req
->req_flags
))) {
83 lport
= io_req
->port
->lport
;
84 rdata
= io_req
->tgt
->rdata
;
85 logo_issued
= test_and_set_bit(
86 BNX2FC_FLAG_EXPL_LOGO
,
88 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
89 spin_unlock_bh(&tgt
->tgt_lock
);
91 /* Explicitly logo the target */
93 BNX2FC_IO_DBG(io_req
, "Explicit "
94 "logo - tgt flags = 0x%lx\n",
97 mutex_lock(&lport
->disc
.disc_mutex
);
98 lport
->tt
.rport_logoff(rdata
);
99 mutex_unlock(&lport
->disc
.disc_mutex
);
104 /* Hanlde IO timeout */
105 BNX2FC_IO_DBG(io_req
, "IO timed out. issue ABTS\n");
106 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL
,
107 &io_req
->req_flags
)) {
108 BNX2FC_IO_DBG(io_req
, "IO completed before "
113 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
114 &io_req
->req_flags
)) {
115 rc
= bnx2fc_initiate_abts(io_req
);
119 * Explicitly logo the target if
120 * abts initiation fails
122 lport
= io_req
->port
->lport
;
123 rdata
= io_req
->tgt
->rdata
;
124 logo_issued
= test_and_set_bit(
125 BNX2FC_FLAG_EXPL_LOGO
,
127 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
128 spin_unlock_bh(&tgt
->tgt_lock
);
131 BNX2FC_IO_DBG(io_req
, "Explicit "
132 "logo - tgt flags = 0x%lx\n",
136 mutex_lock(&lport
->disc
.disc_mutex
);
137 lport
->tt
.rport_logoff(rdata
);
138 mutex_unlock(&lport
->disc
.disc_mutex
);
142 BNX2FC_IO_DBG(io_req
, "IO already in "
143 "ABTS processing\n");
149 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
)) {
150 BNX2FC_IO_DBG(io_req
, "ABTS for ELS timed out\n");
152 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
153 &io_req
->req_flags
)) {
154 lport
= io_req
->port
->lport
;
155 rdata
= io_req
->tgt
->rdata
;
156 logo_issued
= test_and_set_bit(
157 BNX2FC_FLAG_EXPL_LOGO
,
159 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
160 spin_unlock_bh(&tgt
->tgt_lock
);
162 /* Explicitly logo the target */
164 BNX2FC_IO_DBG(io_req
, "Explicitly logo"
166 mutex_lock(&lport
->disc
.disc_mutex
);
167 lport
->tt
.rport_logoff(rdata
);
168 mutex_unlock(&lport
->disc
.disc_mutex
);
174 * Handle ELS timeout.
175 * tgt_lock is used to sync compl path and timeout
176 * path. If els compl path is processing this IO, we
177 * have nothing to do here, just release the timer hold
179 BNX2FC_IO_DBG(io_req
, "ELS timed out\n");
180 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE
,
184 /* Indicate the cb_func that this ELS is timed out */
185 set_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &io_req
->req_flags
);
187 if ((io_req
->cb_func
) && (io_req
->cb_arg
)) {
188 io_req
->cb_func(io_req
->cb_arg
);
189 io_req
->cb_arg
= NULL
;
194 printk(KERN_ERR PFX
"cmd_timeout: invalid cmd_type %d\n",
200 /* release the cmd that was held when timer was set */
201 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
202 spin_unlock_bh(&tgt
->tgt_lock
);
205 static void bnx2fc_scsi_done(struct bnx2fc_cmd
*io_req
, int err_code
)
207 /* Called with host lock held */
208 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
211 * active_cmd_queue may have other command types as well,
212 * and during flush operation, we want to error back only
215 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
)
218 BNX2FC_IO_DBG(io_req
, "scsi_done. err_code = 0x%x\n", err_code
);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST
, &io_req
->req_flags
)) {
220 /* Do not call scsi done for this IO */
224 bnx2fc_unmap_sg_list(io_req
);
225 io_req
->sc_cmd
= NULL
;
227 printk(KERN_ERR PFX
"scsi_done - sc_cmd NULL. "
228 "IO(0x%x) already cleaned up\n",
232 sc_cmd
->result
= err_code
<< 16;
234 BNX2FC_IO_DBG(io_req
, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
235 sc_cmd
, host_byte(sc_cmd
->result
), sc_cmd
->retries
,
237 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
238 sc_cmd
->SCp
.ptr
= NULL
;
239 sc_cmd
->scsi_done(sc_cmd
);
242 struct bnx2fc_cmd_mgr
*bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba
*hba
,
243 u16 min_xid
, u16 max_xid
)
245 struct bnx2fc_cmd_mgr
*cmgr
;
246 struct io_bdt
*bdt_info
;
247 struct bnx2fc_cmd
*io_req
;
252 int num_ios
, num_pri_ios
;
254 int arr_sz
= num_possible_cpus() + 1;
256 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
257 printk(KERN_ERR PFX
"cmd_mgr_alloc: Invalid min_xid 0x%x \
258 and max_xid 0x%x\n", min_xid
, max_xid
);
261 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid
, max_xid
);
263 num_ios
= max_xid
- min_xid
+ 1;
264 len
= (num_ios
* (sizeof(struct bnx2fc_cmd
*)));
265 len
+= sizeof(struct bnx2fc_cmd_mgr
);
267 cmgr
= kzalloc(len
, GFP_KERNEL
);
269 printk(KERN_ERR PFX
"failed to alloc cmgr\n");
273 cmgr
->free_list
= kzalloc(sizeof(*cmgr
->free_list
) *
275 if (!cmgr
->free_list
) {
276 printk(KERN_ERR PFX
"failed to alloc free_list\n");
280 cmgr
->free_list_lock
= kzalloc(sizeof(*cmgr
->free_list_lock
) *
282 if (!cmgr
->free_list_lock
) {
283 printk(KERN_ERR PFX
"failed to alloc free_list_lock\n");
288 cmgr
->cmds
= (struct bnx2fc_cmd
**)(cmgr
+ 1);
290 for (i
= 0; i
< arr_sz
; i
++) {
291 INIT_LIST_HEAD(&cmgr
->free_list
[i
]);
292 spin_lock_init(&cmgr
->free_list_lock
[i
]);
296 * Pre-allocated pool of bnx2fc_cmds.
297 * Last entry in the free list array is the free list
298 * of slow path requests.
300 xid
= BNX2FC_MIN_XID
;
301 num_pri_ios
= num_ios
- BNX2FC_ELSTM_XIDS
;
302 for (i
= 0; i
< num_ios
; i
++) {
303 io_req
= kzalloc(sizeof(*io_req
), GFP_KERNEL
);
306 printk(KERN_ERR PFX
"failed to alloc io_req\n");
310 INIT_LIST_HEAD(&io_req
->link
);
311 INIT_DELAYED_WORK(&io_req
->timeout_work
, bnx2fc_cmd_timeout
);
315 list_add_tail(&io_req
->link
,
316 &cmgr
->free_list
[io_req
->xid
%
317 num_possible_cpus()]);
319 list_add_tail(&io_req
->link
,
320 &cmgr
->free_list
[num_possible_cpus()]);
324 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
325 mem_size
= num_ios
* sizeof(struct io_bdt
*);
326 cmgr
->io_bdt_pool
= kmalloc(mem_size
, GFP_KERNEL
);
327 if (!cmgr
->io_bdt_pool
) {
328 printk(KERN_ERR PFX
"failed to alloc io_bdt_pool\n");
332 mem_size
= sizeof(struct io_bdt
);
333 for (i
= 0; i
< num_ios
; i
++) {
334 cmgr
->io_bdt_pool
[i
] = kmalloc(mem_size
, GFP_KERNEL
);
335 if (!cmgr
->io_bdt_pool
[i
]) {
336 printk(KERN_ERR PFX
"failed to alloc "
337 "io_bdt_pool[%d]\n", i
);
342 /* Allocate an map fcoe_bdt_ctx structures */
343 bd_tbl_sz
= BNX2FC_MAX_BDS_PER_CMD
* sizeof(struct fcoe_bd_ctx
);
344 for (i
= 0; i
< num_ios
; i
++) {
345 bdt_info
= cmgr
->io_bdt_pool
[i
];
346 bdt_info
->bd_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
348 &bdt_info
->bd_tbl_dma
,
350 if (!bdt_info
->bd_tbl
) {
351 printk(KERN_ERR PFX
"failed to alloc "
360 bnx2fc_cmd_mgr_free(cmgr
);
364 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr
*cmgr
)
366 struct io_bdt
*bdt_info
;
367 struct bnx2fc_hba
*hba
= cmgr
->hba
;
369 u16 min_xid
= BNX2FC_MIN_XID
;
370 u16 max_xid
= BNX2FC_MAX_XID
;
374 num_ios
= max_xid
- min_xid
+ 1;
376 /* Free fcoe_bdt_ctx structures */
377 if (!cmgr
->io_bdt_pool
)
380 bd_tbl_sz
= BNX2FC_MAX_BDS_PER_CMD
* sizeof(struct fcoe_bd_ctx
);
381 for (i
= 0; i
< num_ios
; i
++) {
382 bdt_info
= cmgr
->io_bdt_pool
[i
];
383 if (bdt_info
->bd_tbl
) {
384 dma_free_coherent(&hba
->pcidev
->dev
, bd_tbl_sz
,
386 bdt_info
->bd_tbl_dma
);
387 bdt_info
->bd_tbl
= NULL
;
391 /* Destroy io_bdt pool */
392 for (i
= 0; i
< num_ios
; i
++) {
393 kfree(cmgr
->io_bdt_pool
[i
]);
394 cmgr
->io_bdt_pool
[i
] = NULL
;
397 kfree(cmgr
->io_bdt_pool
);
398 cmgr
->io_bdt_pool
= NULL
;
401 kfree(cmgr
->free_list_lock
);
403 /* Destroy cmd pool */
404 if (!cmgr
->free_list
)
407 for (i
= 0; i
< num_possible_cpus() + 1; i
++) {
408 struct bnx2fc_cmd
*tmp
, *io_req
;
410 list_for_each_entry_safe(io_req
, tmp
,
411 &cmgr
->free_list
[i
], link
) {
412 list_del(&io_req
->link
);
416 kfree(cmgr
->free_list
);
418 /* Free command manager itself */
422 struct bnx2fc_cmd
*bnx2fc_elstm_alloc(struct bnx2fc_rport
*tgt
, int type
)
424 struct fcoe_port
*port
= tgt
->port
;
425 struct bnx2fc_interface
*interface
= port
->priv
;
426 struct bnx2fc_cmd_mgr
*cmd_mgr
= interface
->hba
->cmd_mgr
;
427 struct bnx2fc_cmd
*io_req
;
428 struct list_head
*listp
;
429 struct io_bdt
*bd_tbl
;
430 int index
= RESERVE_FREE_LIST_INDEX
;
435 max_sqes
= tgt
->max_sqes
;
437 case BNX2FC_TASK_MGMT_CMD
:
438 max_sqes
= BNX2FC_TM_MAX_SQES
;
441 max_sqes
= BNX2FC_ELS_MAX_SQES
;
448 * NOTE: Free list insertions and deletions are protected with
451 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
452 free_sqes
= atomic_read(&tgt
->free_sqes
);
453 if ((list_empty(&(cmd_mgr
->free_list
[index
]))) ||
454 (tgt
->num_active_ios
.counter
>= max_sqes
) ||
455 (free_sqes
+ max_sqes
<= BNX2FC_SQ_WQES_MAX
)) {
456 BNX2FC_TGT_DBG(tgt
, "No free els_tm cmds available "
457 "ios(%d):sqes(%d)\n",
458 tgt
->num_active_ios
.counter
, tgt
->max_sqes
);
459 if (list_empty(&(cmd_mgr
->free_list
[index
])))
460 printk(KERN_ERR PFX
"elstm_alloc: list_empty\n");
461 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
465 listp
= (struct list_head
*)
466 cmd_mgr
->free_list
[index
].next
;
467 list_del_init(listp
);
468 io_req
= (struct bnx2fc_cmd
*) listp
;
470 cmd_mgr
->cmds
[xid
] = io_req
;
471 atomic_inc(&tgt
->num_active_ios
);
472 atomic_dec(&tgt
->free_sqes
);
473 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
475 INIT_LIST_HEAD(&io_req
->link
);
478 io_req
->cmd_mgr
= cmd_mgr
;
479 io_req
->req_flags
= 0;
480 io_req
->cmd_type
= type
;
482 /* Bind io_bdt for this io_req */
483 /* Have a static link between io_req and io_bdt_pool */
484 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
485 bd_tbl
->io_req
= io_req
;
487 /* Hold the io_req against deletion */
488 kref_init(&io_req
->refcount
);
492 struct bnx2fc_cmd
*bnx2fc_cmd_alloc(struct bnx2fc_rport
*tgt
)
494 struct fcoe_port
*port
= tgt
->port
;
495 struct bnx2fc_interface
*interface
= port
->priv
;
496 struct bnx2fc_cmd_mgr
*cmd_mgr
= interface
->hba
->cmd_mgr
;
497 struct bnx2fc_cmd
*io_req
;
498 struct list_head
*listp
;
499 struct io_bdt
*bd_tbl
;
503 int index
= get_cpu();
505 max_sqes
= BNX2FC_SCSI_MAX_SQES
;
507 * NOTE: Free list insertions and deletions are protected with
510 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
511 free_sqes
= atomic_read(&tgt
->free_sqes
);
512 if ((list_empty(&cmd_mgr
->free_list
[index
])) ||
513 (tgt
->num_active_ios
.counter
>= max_sqes
) ||
514 (free_sqes
+ max_sqes
<= BNX2FC_SQ_WQES_MAX
)) {
515 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
520 listp
= (struct list_head
*)
521 cmd_mgr
->free_list
[index
].next
;
522 list_del_init(listp
);
523 io_req
= (struct bnx2fc_cmd
*) listp
;
525 cmd_mgr
->cmds
[xid
] = io_req
;
526 atomic_inc(&tgt
->num_active_ios
);
527 atomic_dec(&tgt
->free_sqes
);
528 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
531 INIT_LIST_HEAD(&io_req
->link
);
534 io_req
->cmd_mgr
= cmd_mgr
;
535 io_req
->req_flags
= 0;
537 /* Bind io_bdt for this io_req */
538 /* Have a static link between io_req and io_bdt_pool */
539 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
540 bd_tbl
->io_req
= io_req
;
542 /* Hold the io_req against deletion */
543 kref_init(&io_req
->refcount
);
547 void bnx2fc_cmd_release(struct kref
*ref
)
549 struct bnx2fc_cmd
*io_req
= container_of(ref
,
550 struct bnx2fc_cmd
, refcount
);
551 struct bnx2fc_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
554 if (io_req
->cmd_type
== BNX2FC_SCSI_CMD
)
555 index
= io_req
->xid
% num_possible_cpus();
557 index
= RESERVE_FREE_LIST_INDEX
;
560 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
561 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
)
562 bnx2fc_free_mp_resc(io_req
);
563 cmd_mgr
->cmds
[io_req
->xid
] = NULL
;
564 /* Delete IO from retire queue */
565 list_del_init(&io_req
->link
);
566 /* Add it to the free list */
567 list_add(&io_req
->link
,
568 &cmd_mgr
->free_list
[index
]);
569 atomic_dec(&io_req
->tgt
->num_active_ios
);
570 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
574 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd
*io_req
)
576 struct bnx2fc_mp_req
*mp_req
= &(io_req
->mp_req
);
577 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
578 struct bnx2fc_hba
*hba
= interface
->hba
;
579 size_t sz
= sizeof(struct fcoe_bd_ctx
);
582 mp_req
->tm_flags
= 0;
583 if (mp_req
->mp_req_bd
) {
584 dma_free_coherent(&hba
->pcidev
->dev
, sz
,
586 mp_req
->mp_req_bd_dma
);
587 mp_req
->mp_req_bd
= NULL
;
589 if (mp_req
->mp_resp_bd
) {
590 dma_free_coherent(&hba
->pcidev
->dev
, sz
,
592 mp_req
->mp_resp_bd_dma
);
593 mp_req
->mp_resp_bd
= NULL
;
595 if (mp_req
->req_buf
) {
596 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
598 mp_req
->req_buf_dma
);
599 mp_req
->req_buf
= NULL
;
601 if (mp_req
->resp_buf
) {
602 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
604 mp_req
->resp_buf_dma
);
605 mp_req
->resp_buf
= NULL
;
609 int bnx2fc_init_mp_req(struct bnx2fc_cmd
*io_req
)
611 struct bnx2fc_mp_req
*mp_req
;
612 struct fcoe_bd_ctx
*mp_req_bd
;
613 struct fcoe_bd_ctx
*mp_resp_bd
;
614 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
615 struct bnx2fc_hba
*hba
= interface
->hba
;
619 mp_req
= (struct bnx2fc_mp_req
*)&(io_req
->mp_req
);
620 memset(mp_req
, 0, sizeof(struct bnx2fc_mp_req
));
622 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
623 io_req
->data_xfer_len
= mp_req
->req_len
;
624 mp_req
->req_buf
= dma_alloc_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
625 &mp_req
->req_buf_dma
,
627 if (!mp_req
->req_buf
) {
628 printk(KERN_ERR PFX
"unable to alloc MP req buffer\n");
629 bnx2fc_free_mp_resc(io_req
);
633 mp_req
->resp_buf
= dma_alloc_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
634 &mp_req
->resp_buf_dma
,
636 if (!mp_req
->resp_buf
) {
637 printk(KERN_ERR PFX
"unable to alloc TM resp buffer\n");
638 bnx2fc_free_mp_resc(io_req
);
641 memset(mp_req
->req_buf
, 0, PAGE_SIZE
);
642 memset(mp_req
->resp_buf
, 0, PAGE_SIZE
);
644 /* Allocate and map mp_req_bd and mp_resp_bd */
645 sz
= sizeof(struct fcoe_bd_ctx
);
646 mp_req
->mp_req_bd
= dma_alloc_coherent(&hba
->pcidev
->dev
, sz
,
647 &mp_req
->mp_req_bd_dma
,
649 if (!mp_req
->mp_req_bd
) {
650 printk(KERN_ERR PFX
"unable to alloc MP req bd\n");
651 bnx2fc_free_mp_resc(io_req
);
654 mp_req
->mp_resp_bd
= dma_alloc_coherent(&hba
->pcidev
->dev
, sz
,
655 &mp_req
->mp_resp_bd_dma
,
657 if (!mp_req
->mp_req_bd
) {
658 printk(KERN_ERR PFX
"unable to alloc MP resp bd\n");
659 bnx2fc_free_mp_resc(io_req
);
663 addr
= mp_req
->req_buf_dma
;
664 mp_req_bd
= mp_req
->mp_req_bd
;
665 mp_req_bd
->buf_addr_lo
= (u32
)addr
& 0xffffffff;
666 mp_req_bd
->buf_addr_hi
= (u32
)((u64
)addr
>> 32);
667 mp_req_bd
->buf_len
= PAGE_SIZE
;
668 mp_req_bd
->flags
= 0;
671 * MP buffer is either a task mgmt command or an ELS.
672 * So the assumption is that it consumes a single bd
673 * entry in the bd table
675 mp_resp_bd
= mp_req
->mp_resp_bd
;
676 addr
= mp_req
->resp_buf_dma
;
677 mp_resp_bd
->buf_addr_lo
= (u32
)addr
& 0xffffffff;
678 mp_resp_bd
->buf_addr_hi
= (u32
)((u64
)addr
>> 32);
679 mp_resp_bd
->buf_len
= PAGE_SIZE
;
680 mp_resp_bd
->flags
= 0;
685 static int bnx2fc_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
687 struct fc_lport
*lport
;
688 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
689 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
690 struct fcoe_port
*port
;
691 struct bnx2fc_interface
*interface
;
692 struct bnx2fc_rport
*tgt
;
693 struct bnx2fc_cmd
*io_req
;
694 struct bnx2fc_mp_req
*tm_req
;
695 struct fcoe_task_ctx_entry
*task
;
696 struct fcoe_task_ctx_entry
*task_page
;
697 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
698 struct fc_frame_header
*fc_hdr
;
699 struct fcp_cmnd
*fcp_cmnd
;
704 unsigned long start
= jiffies
;
706 lport
= shost_priv(host
);
707 port
= lport_priv(lport
);
708 interface
= port
->priv
;
711 printk(KERN_ERR PFX
"device_reset: rport is NULL\n");
716 rc
= fc_block_scsi_eh(sc_cmd
);
720 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
721 printk(KERN_ERR PFX
"device_reset: link is not ready\n");
725 /* rport and tgt are allocated together, so tgt should be non-NULL */
726 tgt
= (struct bnx2fc_rport
*)&rp
[1];
728 if (!(test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
))) {
729 printk(KERN_ERR PFX
"device_reset: tgt not offloaded\n");
734 io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_TASK_MGMT_CMD
);
736 if (time_after(jiffies
, start
+ HZ
)) {
737 printk(KERN_ERR PFX
"tmf: Failed TMF");
744 /* Initialize rest of io_req fields */
745 io_req
->sc_cmd
= sc_cmd
;
749 tm_req
= (struct bnx2fc_mp_req
*)&(io_req
->mp_req
);
751 rc
= bnx2fc_init_mp_req(io_req
);
753 printk(KERN_ERR PFX
"Task mgmt MP request init failed\n");
754 spin_lock_bh(&tgt
->tgt_lock
);
755 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
756 spin_unlock_bh(&tgt
->tgt_lock
);
761 io_req
->io_req_flags
= 0;
762 tm_req
->tm_flags
= tm_flags
;
765 bnx2fc_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tm_req
->req_buf
);
766 fcp_cmnd
= (struct fcp_cmnd
*)tm_req
->req_buf
;
767 memset(fcp_cmnd
->fc_cdb
, 0, sc_cmd
->cmd_len
);
771 fc_hdr
= &(tm_req
->req_fc_hdr
);
773 did
= rport
->port_id
;
774 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_DD_UNSOL_CMD
, did
, sid
,
775 FC_TYPE_FCP
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
777 /* Obtain exchange id */
780 BNX2FC_TGT_DBG(tgt
, "Initiate TMF - xid = 0x%x\n", xid
);
781 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
782 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
784 /* Initialize task context for this IO request */
785 task_page
= (struct fcoe_task_ctx_entry
*)
786 interface
->hba
->task_ctx
[task_idx
];
787 task
= &(task_page
[index
]);
788 bnx2fc_init_mp_task(io_req
, task
);
790 sc_cmd
->SCp
.ptr
= (char *)io_req
;
792 /* Obtain free SQ entry */
793 spin_lock_bh(&tgt
->tgt_lock
);
794 bnx2fc_add_2_sq(tgt
, xid
);
796 /* Enqueue the io_req to active_tm_queue */
797 io_req
->on_tmf_queue
= 1;
798 list_add_tail(&io_req
->link
, &tgt
->active_tm_queue
);
800 init_completion(&io_req
->tm_done
);
801 io_req
->wait_for_comp
= 1;
804 bnx2fc_ring_doorbell(tgt
);
805 spin_unlock_bh(&tgt
->tgt_lock
);
807 rc
= wait_for_completion_timeout(&io_req
->tm_done
,
808 BNX2FC_TM_TIMEOUT
* HZ
);
809 spin_lock_bh(&tgt
->tgt_lock
);
811 io_req
->wait_for_comp
= 0;
812 if (!(test_bit(BNX2FC_FLAG_TM_COMPL
, &io_req
->req_flags
))) {
813 set_bit(BNX2FC_FLAG_TM_TIMEOUT
, &io_req
->req_flags
);
814 if (io_req
->on_tmf_queue
) {
815 list_del_init(&io_req
->link
);
816 io_req
->on_tmf_queue
= 0;
818 io_req
->wait_for_comp
= 1;
819 bnx2fc_initiate_cleanup(io_req
);
820 spin_unlock_bh(&tgt
->tgt_lock
);
821 rc
= wait_for_completion_timeout(&io_req
->tm_done
,
823 spin_lock_bh(&tgt
->tgt_lock
);
824 io_req
->wait_for_comp
= 0;
826 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
829 spin_unlock_bh(&tgt
->tgt_lock
);
832 BNX2FC_TGT_DBG(tgt
, "task mgmt command failed...\n");
835 BNX2FC_TGT_DBG(tgt
, "task mgmt command success...\n");
842 int bnx2fc_initiate_abts(struct bnx2fc_cmd
*io_req
)
844 struct fc_lport
*lport
;
845 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
846 struct fc_rport
*rport
= tgt
->rport
;
847 struct fc_rport_priv
*rdata
= tgt
->rdata
;
848 struct bnx2fc_interface
*interface
;
849 struct fcoe_port
*port
;
850 struct bnx2fc_cmd
*abts_io_req
;
851 struct fcoe_task_ctx_entry
*task
;
852 struct fcoe_task_ctx_entry
*task_page
;
853 struct fc_frame_header
*fc_hdr
;
854 struct bnx2fc_mp_req
*abts_req
;
859 u32 r_a_tov
= rdata
->r_a_tov
;
861 /* called with tgt_lock held */
862 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_initiate_abts\n");
865 interface
= port
->priv
;
868 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
869 printk(KERN_ERR PFX
"initiate_abts: tgt not offloaded\n");
875 printk(KERN_ERR PFX
"initiate_abts: rport is NULL\n");
880 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
881 printk(KERN_ERR PFX
"initiate_abts: link is not ready\n");
886 abts_io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_ABTS
);
888 printk(KERN_ERR PFX
"abts: couldnt allocate cmd\n");
893 /* Initialize rest of io_req fields */
894 abts_io_req
->sc_cmd
= NULL
;
895 abts_io_req
->port
= port
;
896 abts_io_req
->tgt
= tgt
;
897 abts_io_req
->data_xfer_len
= 0; /* No data transfer for ABTS */
899 abts_req
= (struct bnx2fc_mp_req
*)&(abts_io_req
->mp_req
);
900 memset(abts_req
, 0, sizeof(struct bnx2fc_mp_req
));
903 fc_hdr
= &(abts_req
->req_fc_hdr
);
905 /* Obtain oxid and rxid for the original exchange to be aborted */
906 fc_hdr
->fh_ox_id
= htons(io_req
->xid
);
907 fc_hdr
->fh_rx_id
= htons(io_req
->task
->rxwr_txrd
.var_ctx
.rx_id
);
910 did
= rport
->port_id
;
912 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_BA_ABTS
, did
, sid
,
913 FC_TYPE_BLS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
916 xid
= abts_io_req
->xid
;
917 BNX2FC_IO_DBG(abts_io_req
, "ABTS io_req\n");
918 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
919 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
921 /* Initialize task context for this IO request */
922 task_page
= (struct fcoe_task_ctx_entry
*)
923 interface
->hba
->task_ctx
[task_idx
];
924 task
= &(task_page
[index
]);
925 bnx2fc_init_mp_task(abts_io_req
, task
);
928 * ABTS task is a temporary task that will be cleaned up
929 * irrespective of ABTS response. We need to start the timer
930 * for the original exchange, as the CQE is posted for the original
933 * Timer for ABTS is started only when it is originated by a
934 * TM request. For the ABTS issued as part of ULP timeout,
935 * scsi-ml maintains the timers.
938 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
939 bnx2fc_cmd_timer_set(io_req
, 2 * r_a_tov
);
941 /* Obtain free SQ entry */
942 bnx2fc_add_2_sq(tgt
, xid
);
945 bnx2fc_ring_doorbell(tgt
);
951 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd
*orig_io_req
, u32 offset
,
954 struct fc_lport
*lport
;
955 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
956 struct bnx2fc_interface
*interface
;
957 struct fcoe_port
*port
;
958 struct bnx2fc_cmd
*seq_clnp_req
;
959 struct fcoe_task_ctx_entry
*task
;
960 struct fcoe_task_ctx_entry
*task_page
;
961 struct bnx2fc_els_cb_arg
*cb_arg
= NULL
;
966 BNX2FC_IO_DBG(orig_io_req
, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
968 kref_get(&orig_io_req
->refcount
);
970 port
= orig_io_req
->port
;
971 interface
= port
->priv
;
974 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
976 printk(KERN_ERR PFX
"Unable to alloc cb_arg for seq clnup\n");
981 seq_clnp_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_SEQ_CLEANUP
);
983 printk(KERN_ERR PFX
"cleanup: couldnt allocate cmd\n");
988 /* Initialize rest of io_req fields */
989 seq_clnp_req
->sc_cmd
= NULL
;
990 seq_clnp_req
->port
= port
;
991 seq_clnp_req
->tgt
= tgt
;
992 seq_clnp_req
->data_xfer_len
= 0; /* No data transfer for cleanup */
994 xid
= seq_clnp_req
->xid
;
996 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
997 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
999 /* Initialize task context for this IO request */
1000 task_page
= (struct fcoe_task_ctx_entry
*)
1001 interface
->hba
->task_ctx
[task_idx
];
1002 task
= &(task_page
[index
]);
1003 cb_arg
->aborted_io_req
= orig_io_req
;
1004 cb_arg
->io_req
= seq_clnp_req
;
1005 cb_arg
->r_ctl
= r_ctl
;
1006 cb_arg
->offset
= offset
;
1007 seq_clnp_req
->cb_arg
= cb_arg
;
1009 printk(KERN_ERR PFX
"call init_seq_cleanup_task\n");
1010 bnx2fc_init_seq_cleanup_task(seq_clnp_req
, task
, orig_io_req
, offset
);
1012 /* Obtain free SQ entry */
1013 bnx2fc_add_2_sq(tgt
, xid
);
1016 bnx2fc_ring_doorbell(tgt
);
1021 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd
*io_req
)
1023 struct fc_lport
*lport
;
1024 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1025 struct bnx2fc_interface
*interface
;
1026 struct fcoe_port
*port
;
1027 struct bnx2fc_cmd
*cleanup_io_req
;
1028 struct fcoe_task_ctx_entry
*task
;
1029 struct fcoe_task_ctx_entry
*task_page
;
1030 int task_idx
, index
;
1034 /* ASSUMPTION: called with tgt_lock held */
1035 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_initiate_cleanup\n");
1037 port
= io_req
->port
;
1038 interface
= port
->priv
;
1039 lport
= port
->lport
;
1041 cleanup_io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_CLEANUP
);
1042 if (!cleanup_io_req
) {
1043 printk(KERN_ERR PFX
"cleanup: couldnt allocate cmd\n");
1048 /* Initialize rest of io_req fields */
1049 cleanup_io_req
->sc_cmd
= NULL
;
1050 cleanup_io_req
->port
= port
;
1051 cleanup_io_req
->tgt
= tgt
;
1052 cleanup_io_req
->data_xfer_len
= 0; /* No data transfer for cleanup */
1054 xid
= cleanup_io_req
->xid
;
1056 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
1057 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
1059 /* Initialize task context for this IO request */
1060 task_page
= (struct fcoe_task_ctx_entry
*)
1061 interface
->hba
->task_ctx
[task_idx
];
1062 task
= &(task_page
[index
]);
1063 orig_xid
= io_req
->xid
;
1065 BNX2FC_IO_DBG(io_req
, "CLEANUP io_req xid = 0x%x\n", xid
);
1067 bnx2fc_init_cleanup_task(cleanup_io_req
, task
, orig_xid
);
1069 /* Obtain free SQ entry */
1070 bnx2fc_add_2_sq(tgt
, xid
);
1073 bnx2fc_ring_doorbell(tgt
);
1080 * bnx2fc_eh_target_reset: Reset a target
1082 * @sc_cmd: SCSI command
1084 * Set from SCSI host template to send task mgmt command to the target
1085 * and wait for the response
1087 int bnx2fc_eh_target_reset(struct scsi_cmnd
*sc_cmd
)
1089 return bnx2fc_initiate_tmf(sc_cmd
, FCP_TMF_TGT_RESET
);
1093 * bnx2fc_eh_device_reset - Reset a single LUN
1095 * @sc_cmd: SCSI command
1097 * Set from SCSI host template to send task mgmt command to the target
1098 * and wait for the response
1100 int bnx2fc_eh_device_reset(struct scsi_cmnd
*sc_cmd
)
1102 return bnx2fc_initiate_tmf(sc_cmd
, FCP_TMF_LUN_RESET
);
1105 int bnx2fc_expl_logo(struct fc_lport
*lport
, struct bnx2fc_cmd
*io_req
)
1107 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1108 struct fc_rport_priv
*rdata
= tgt
->rdata
;
1113 BNX2FC_IO_DBG(io_req
, "Expl logo - tgt flags = 0x%lx\n",
1115 logo_issued
= test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO
,
1117 io_req
->wait_for_comp
= 1;
1118 bnx2fc_initiate_cleanup(io_req
);
1120 spin_unlock_bh(&tgt
->tgt_lock
);
1122 wait_for_completion(&io_req
->tm_done
);
1124 io_req
->wait_for_comp
= 0;
1126 * release the reference taken in eh_abort to allow the
1127 * target to re-login after flushing IOs
1129 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1132 clear_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
);
1133 mutex_lock(&lport
->disc
.disc_mutex
);
1134 lport
->tt
.rport_logoff(rdata
);
1135 mutex_unlock(&lport
->disc
.disc_mutex
);
1137 msleep(BNX2FC_RELOGIN_WAIT_TIME
);
1138 if (wait_cnt
++ > BNX2FC_RELOGIN_WAIT_CNT
) {
1142 } while (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
));
1144 spin_lock_bh(&tgt
->tgt_lock
);
1148 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1151 * @sc_cmd: SCSI_ML command pointer
1153 * SCSI abort request handler
1155 int bnx2fc_eh_abort(struct scsi_cmnd
*sc_cmd
)
1157 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1158 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1159 struct bnx2fc_cmd
*io_req
;
1160 struct fc_lport
*lport
;
1161 struct bnx2fc_rport
*tgt
;
1165 rc
= fc_block_scsi_eh(sc_cmd
);
1169 lport
= shost_priv(sc_cmd
->device
->host
);
1170 if ((lport
->state
!= LPORT_ST_READY
) || !(lport
->link_up
)) {
1171 printk(KERN_ERR PFX
"eh_abort: link not ready\n");
1175 tgt
= (struct bnx2fc_rport
*)&rp
[1];
1177 BNX2FC_TGT_DBG(tgt
, "Entered bnx2fc_eh_abort\n");
1179 spin_lock_bh(&tgt
->tgt_lock
);
1180 io_req
= (struct bnx2fc_cmd
*)sc_cmd
->SCp
.ptr
;
1182 /* Command might have just completed */
1183 printk(KERN_ERR PFX
"eh_abort: io_req is NULL\n");
1184 spin_unlock_bh(&tgt
->tgt_lock
);
1187 BNX2FC_IO_DBG(io_req
, "eh_abort - refcnt = %d\n",
1188 io_req
->refcount
.refcount
.counter
);
1190 /* Hold IO request across abort processing */
1191 kref_get(&io_req
->refcount
);
1193 BUG_ON(tgt
!= io_req
->tgt
);
1195 /* Remove the io_req from the active_q. */
1197 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1198 * issue an ABTS on this particular IO req, as the
1199 * io_req is no longer in the active_q.
1201 if (tgt
->flush_in_prog
) {
1202 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1203 "flush in progress\n", io_req
->xid
);
1204 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1205 spin_unlock_bh(&tgt
->tgt_lock
);
1209 if (io_req
->on_active_queue
== 0) {
1210 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1211 "not on active_q\n", io_req
->xid
);
1213 * This condition can happen only due to the FW bug,
1214 * where we do not receive cleanup response from
1215 * the FW. Handle this case gracefully by erroring
1216 * back the IO request to SCSI-ml
1218 bnx2fc_scsi_done(io_req
, DID_ABORT
);
1220 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1221 spin_unlock_bh(&tgt
->tgt_lock
);
1226 * Only eh_abort processing will remove the IO from
1227 * active_cmd_q before processing the request. this is
1228 * done to avoid race conditions between IOs aborted
1229 * as part of task management completion and eh_abort
1232 list_del_init(&io_req
->link
);
1233 io_req
->on_active_queue
= 0;
1234 /* Move IO req to retire queue */
1235 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1237 init_completion(&io_req
->tm_done
);
1239 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
)) {
1240 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1241 "already in abts processing\n", io_req
->xid
);
1242 if (cancel_delayed_work(&io_req
->timeout_work
))
1243 kref_put(&io_req
->refcount
,
1244 bnx2fc_cmd_release
); /* drop timer hold */
1245 rc
= bnx2fc_expl_logo(lport
, io_req
);
1249 /* Cancel the current timer running on this io_req */
1250 if (cancel_delayed_work(&io_req
->timeout_work
))
1251 kref_put(&io_req
->refcount
,
1252 bnx2fc_cmd_release
); /* drop timer hold */
1253 set_bit(BNX2FC_FLAG_EH_ABORT
, &io_req
->req_flags
);
1254 io_req
->wait_for_comp
= 1;
1255 rc
= bnx2fc_initiate_abts(io_req
);
1257 bnx2fc_initiate_cleanup(io_req
);
1258 spin_unlock_bh(&tgt
->tgt_lock
);
1259 wait_for_completion(&io_req
->tm_done
);
1260 spin_lock_bh(&tgt
->tgt_lock
);
1261 io_req
->wait_for_comp
= 0;
1264 spin_unlock_bh(&tgt
->tgt_lock
);
1266 wait_for_completion(&io_req
->tm_done
);
1268 spin_lock_bh(&tgt
->tgt_lock
);
1269 io_req
->wait_for_comp
= 0;
1270 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
1271 &io_req
->req_flags
))) {
1272 /* Let the scsi-ml try to recover this command */
1273 printk(KERN_ERR PFX
"abort failed, xid = 0x%x\n",
1275 rc
= bnx2fc_expl_logo(lport
, io_req
);
1279 * We come here even when there was a race condition
1280 * between timeout and abts completion, and abts
1281 * completion happens just in time.
1283 BNX2FC_IO_DBG(io_req
, "abort succeeded\n");
1285 bnx2fc_scsi_done(io_req
, DID_ABORT
);
1286 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1289 /* release the reference taken in eh_abort */
1290 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1292 spin_unlock_bh(&tgt
->tgt_lock
);
1296 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd
*seq_clnp_req
,
1297 struct fcoe_task_ctx_entry
*task
,
1300 struct bnx2fc_els_cb_arg
*cb_arg
= seq_clnp_req
->cb_arg
;
1301 struct bnx2fc_cmd
*orig_io_req
= cb_arg
->aborted_io_req
;
1302 u32 offset
= cb_arg
->offset
;
1303 enum fc_rctl r_ctl
= cb_arg
->r_ctl
;
1305 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
1307 BNX2FC_IO_DBG(orig_io_req
, "Entered process_cleanup_compl xid = 0x%x"
1309 seq_clnp_req
->xid
, seq_clnp_req
->cmd_type
);
1311 if (rx_state
== FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP
) {
1312 printk(KERN_ERR PFX
"seq cleanup ignored - xid = 0x%x\n",
1317 spin_unlock_bh(&tgt
->tgt_lock
);
1318 rc
= bnx2fc_send_srr(orig_io_req
, offset
, r_ctl
);
1319 spin_lock_bh(&tgt
->tgt_lock
);
1322 printk(KERN_ERR PFX
"clnup_compl: Unable to send SRR"
1323 " IO will abort\n");
1324 seq_clnp_req
->cb_arg
= NULL
;
1325 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
1331 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd
*io_req
,
1332 struct fcoe_task_ctx_entry
*task
,
1335 BNX2FC_IO_DBG(io_req
, "Entered process_cleanup_compl "
1336 "refcnt = %d, cmd_type = %d\n",
1337 io_req
->refcount
.refcount
.counter
, io_req
->cmd_type
);
1338 bnx2fc_scsi_done(io_req
, DID_ERROR
);
1339 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1340 if (io_req
->wait_for_comp
)
1341 complete(&io_req
->tm_done
);
1344 void bnx2fc_process_abts_compl(struct bnx2fc_cmd
*io_req
,
1345 struct fcoe_task_ctx_entry
*task
,
1349 u32 r_a_tov
= FC_DEF_R_A_TOV
;
1351 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1353 BNX2FC_IO_DBG(io_req
, "Entered process_abts_compl xid = 0x%x"
1354 "refcnt = %d, cmd_type = %d\n",
1356 io_req
->refcount
.refcount
.counter
, io_req
->cmd_type
);
1358 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
1359 &io_req
->req_flags
)) {
1360 BNX2FC_IO_DBG(io_req
, "Timer context finished processing"
1365 /* Do not issue RRQ as this IO is already cleanedup */
1366 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP
,
1367 &io_req
->req_flags
))
1371 * For ABTS issued due to SCSI eh_abort_handler, timeout
1372 * values are maintained by scsi-ml itself. Cancel timeout
1373 * in case ABTS issued as part of task management function
1374 * or due to FW error.
1376 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
))
1377 if (cancel_delayed_work(&io_req
->timeout_work
))
1378 kref_put(&io_req
->refcount
,
1379 bnx2fc_cmd_release
); /* drop timer hold */
1381 r_ctl
= (u8
)task
->rxwr_only
.union_ctx
.comp_info
.abts_rsp
.r_ctl
;
1384 case FC_RCTL_BA_ACC
:
1386 * Dont release this cmd yet. It will be relesed
1387 * after we get RRQ response
1389 BNX2FC_IO_DBG(io_req
, "ABTS response - ACC Send RRQ\n");
1393 case FC_RCTL_BA_RJT
:
1394 BNX2FC_IO_DBG(io_req
, "ABTS response - RJT\n");
1397 printk(KERN_ERR PFX
"Unknown ABTS response\n");
1402 BNX2FC_IO_DBG(io_req
, "Issue RRQ after R_A_TOV\n");
1403 set_bit(BNX2FC_FLAG_ISSUE_RRQ
, &io_req
->req_flags
);
1405 set_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
);
1406 bnx2fc_cmd_timer_set(io_req
, r_a_tov
);
1409 if (io_req
->wait_for_comp
) {
1410 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
1411 &io_req
->req_flags
))
1412 complete(&io_req
->tm_done
);
1415 * We end up here when ABTS is issued as
1416 * in asynchronous context, i.e., as part
1417 * of task management completion, or
1418 * when FW error is received or when the
1419 * ABTS is issued when the IO is timed
1423 if (io_req
->on_active_queue
) {
1424 list_del_init(&io_req
->link
);
1425 io_req
->on_active_queue
= 0;
1426 /* Move IO req to retire queue */
1427 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1429 bnx2fc_scsi_done(io_req
, DID_ERROR
);
1430 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1434 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd
*io_req
)
1436 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1437 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1438 struct bnx2fc_cmd
*cmd
, *tmp
;
1439 int tm_lun
= sc_cmd
->device
->lun
;
1443 /* called with tgt_lock held */
1444 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_lun_reset_cmpl\n");
1446 * Walk thru the active_ios queue and ABORT the IO
1447 * that matches with the LUN that was reset
1449 list_for_each_entry_safe(cmd
, tmp
, &tgt
->active_cmd_queue
, link
) {
1450 BNX2FC_TGT_DBG(tgt
, "LUN RST cmpl: scan for pending IOs\n");
1451 lun
= cmd
->sc_cmd
->device
->lun
;
1452 if (lun
== tm_lun
) {
1453 /* Initiate ABTS on this cmd */
1454 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
1456 /* cancel the IO timeout */
1457 if (cancel_delayed_work(&io_req
->timeout_work
))
1458 kref_put(&io_req
->refcount
,
1459 bnx2fc_cmd_release
);
1461 rc
= bnx2fc_initiate_abts(cmd
);
1462 /* abts shouldn't fail in this context */
1463 WARN_ON(rc
!= SUCCESS
);
1465 printk(KERN_ERR PFX
"lun_rst: abts already in"
1466 " progress for this IO 0x%x\n",
1472 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd
*io_req
)
1474 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1475 struct bnx2fc_cmd
*cmd
, *tmp
;
1478 /* called with tgt_lock held */
1479 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_tgt_reset_cmpl\n");
1481 * Walk thru the active_ios queue and ABORT the IO
1482 * that matches with the LUN that was reset
1484 list_for_each_entry_safe(cmd
, tmp
, &tgt
->active_cmd_queue
, link
) {
1485 BNX2FC_TGT_DBG(tgt
, "TGT RST cmpl: scan for pending IOs\n");
1487 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
1489 /* cancel the IO timeout */
1490 if (cancel_delayed_work(&io_req
->timeout_work
))
1491 kref_put(&io_req
->refcount
,
1492 bnx2fc_cmd_release
); /* timer hold */
1493 rc
= bnx2fc_initiate_abts(cmd
);
1494 /* abts shouldn't fail in this context */
1495 WARN_ON(rc
!= SUCCESS
);
1498 printk(KERN_ERR PFX
"tgt_rst: abts already in progress"
1499 " for this IO 0x%x\n", cmd
->xid
);
1503 void bnx2fc_process_tm_compl(struct bnx2fc_cmd
*io_req
,
1504 struct fcoe_task_ctx_entry
*task
, u8 num_rq
)
1506 struct bnx2fc_mp_req
*tm_req
;
1507 struct fc_frame_header
*fc_hdr
;
1508 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1513 /* Called with tgt_lock held */
1514 BNX2FC_IO_DBG(io_req
, "Entered process_tm_compl\n");
1516 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT
, &io_req
->req_flags
)))
1517 set_bit(BNX2FC_FLAG_TM_COMPL
, &io_req
->req_flags
);
1519 /* TM has already timed out and we got
1520 * delayed completion. Ignore completion
1526 tm_req
= &(io_req
->mp_req
);
1527 fc_hdr
= &(tm_req
->resp_fc_hdr
);
1528 hdr
= (u64
*)fc_hdr
;
1530 &task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.fc_hdr
;
1531 hdr
[0] = cpu_to_be64(temp_hdr
[0]);
1532 hdr
[1] = cpu_to_be64(temp_hdr
[1]);
1533 hdr
[2] = cpu_to_be64(temp_hdr
[2]);
1536 task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.mp_payload_len
;
1538 rsp_buf
= tm_req
->resp_buf
;
1540 if (fc_hdr
->fh_r_ctl
== FC_RCTL_DD_CMD_STATUS
) {
1541 bnx2fc_parse_fcp_rsp(io_req
,
1542 (struct fcoe_fcp_rsp_payload
*)
1544 if (io_req
->fcp_rsp_code
== 0) {
1546 if (tm_req
->tm_flags
& FCP_TMF_LUN_RESET
)
1547 bnx2fc_lun_reset_cmpl(io_req
);
1548 else if (tm_req
->tm_flags
& FCP_TMF_TGT_RESET
)
1549 bnx2fc_tgt_reset_cmpl(io_req
);
1552 printk(KERN_ERR PFX
"tmf's fc_hdr r_ctl = 0x%x\n",
1555 if (!sc_cmd
->SCp
.ptr
) {
1556 printk(KERN_ERR PFX
"tm_compl: SCp.ptr is NULL\n");
1559 switch (io_req
->fcp_status
) {
1561 if (io_req
->cdb_status
== 0) {
1562 /* Good IO completion */
1563 sc_cmd
->result
= DID_OK
<< 16;
1565 /* Transport status is good, SCSI status not good */
1566 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1568 if (io_req
->fcp_resid
)
1569 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1573 BNX2FC_IO_DBG(io_req
, "process_tm_compl: fcp_status = %d\n",
1574 io_req
->fcp_status
);
1578 sc_cmd
= io_req
->sc_cmd
;
1579 io_req
->sc_cmd
= NULL
;
1581 /* check if the io_req exists in tgt's tmf_q */
1582 if (io_req
->on_tmf_queue
) {
1584 list_del_init(&io_req
->link
);
1585 io_req
->on_tmf_queue
= 0;
1588 printk(KERN_ERR PFX
"Command not on active_cmd_queue!\n");
1592 sc_cmd
->SCp
.ptr
= NULL
;
1593 sc_cmd
->scsi_done(sc_cmd
);
1595 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1596 if (io_req
->wait_for_comp
) {
1597 BNX2FC_IO_DBG(io_req
, "tm_compl - wake up the waiter\n");
1598 complete(&io_req
->tm_done
);
1602 static int bnx2fc_split_bd(struct bnx2fc_cmd
*io_req
, u64 addr
, int sg_len
,
1605 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1606 int frag_size
, sg_frags
;
1610 if (sg_len
>= BNX2FC_BD_SPLIT_SZ
)
1611 frag_size
= BNX2FC_BD_SPLIT_SZ
;
1614 bd
[bd_index
+ sg_frags
].buf_addr_lo
= addr
& 0xffffffff;
1615 bd
[bd_index
+ sg_frags
].buf_addr_hi
= addr
>> 32;
1616 bd
[bd_index
+ sg_frags
].buf_len
= (u16
)frag_size
;
1617 bd
[bd_index
+ sg_frags
].flags
= 0;
1619 addr
+= (u64
) frag_size
;
1621 sg_len
-= frag_size
;
1627 static int bnx2fc_map_sg(struct bnx2fc_cmd
*io_req
)
1629 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
1630 struct bnx2fc_hba
*hba
= interface
->hba
;
1631 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1632 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1633 struct scatterlist
*sg
;
1638 unsigned int sg_len
;
1642 sg_count
= dma_map_sg(&hba
->pcidev
->dev
, scsi_sglist(sc
),
1643 scsi_sg_count(sc
), sc
->sc_data_direction
);
1644 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
1645 sg_len
= sg_dma_len(sg
);
1646 addr
= sg_dma_address(sg
);
1647 if (sg_len
> BNX2FC_MAX_BD_LEN
) {
1648 sg_frags
= bnx2fc_split_bd(io_req
, addr
, sg_len
,
1653 bd
[bd_count
].buf_addr_lo
= addr
& 0xffffffff;
1654 bd
[bd_count
].buf_addr_hi
= addr
>> 32;
1655 bd
[bd_count
].buf_len
= (u16
)sg_len
;
1656 bd
[bd_count
].flags
= 0;
1658 bd_count
+= sg_frags
;
1659 byte_count
+= sg_len
;
1661 if (byte_count
!= scsi_bufflen(sc
))
1662 printk(KERN_ERR PFX
"byte_count = %d != scsi_bufflen = %d, "
1663 "task_id = 0x%x\n", byte_count
, scsi_bufflen(sc
),
1668 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd
*io_req
)
1670 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1671 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1674 if (scsi_sg_count(sc
)) {
1675 bd_count
= bnx2fc_map_sg(io_req
);
1680 bd
[0].buf_addr_lo
= bd
[0].buf_addr_hi
= 0;
1681 bd
[0].buf_len
= bd
[0].flags
= 0;
1683 io_req
->bd_tbl
->bd_valid
= bd_count
;
1688 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd
*io_req
)
1690 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1692 if (io_req
->bd_tbl
->bd_valid
&& sc
) {
1694 io_req
->bd_tbl
->bd_valid
= 0;
1698 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd
*io_req
,
1699 struct fcp_cmnd
*fcp_cmnd
)
1701 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1704 memset(fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
1706 int_to_scsilun(sc_cmd
->device
->lun
, &fcp_cmnd
->fc_lun
);
1708 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
1709 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
1711 fcp_cmnd
->fc_cmdref
= 0;
1712 fcp_cmnd
->fc_pri_ta
= 0;
1713 fcp_cmnd
->fc_tm_flags
= io_req
->mp_req
.tm_flags
;
1714 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
1716 if (scsi_populate_tag_msg(sc_cmd
, tag
)) {
1718 case HEAD_OF_QUEUE_TAG
:
1719 fcp_cmnd
->fc_pri_ta
= FCP_PTA_HEADQ
;
1721 case ORDERED_QUEUE_TAG
:
1722 fcp_cmnd
->fc_pri_ta
= FCP_PTA_ORDERED
;
1725 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
1729 fcp_cmnd
->fc_pri_ta
= 0;
1733 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd
*io_req
,
1734 struct fcoe_fcp_rsp_payload
*fcp_rsp
,
1737 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1738 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1739 u8 rsp_flags
= fcp_rsp
->fcp_flags
.flags
;
1740 u32 rq_buff_len
= 0;
1742 unsigned char *rq_data
;
1743 unsigned char *dummy
;
1744 int fcp_sns_len
= 0;
1745 int fcp_rsp_len
= 0;
1747 io_req
->fcp_status
= FC_GOOD
;
1748 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1750 io_req
->scsi_comp_flags
= rsp_flags
;
1751 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1752 fcp_rsp
->scsi_status_code
;
1754 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1758 * We do not anticipate num_rq >1, as the linux defined
1759 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1760 * 256 bytes of single rq buffer is good enough to hold this.
1764 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
) {
1765 fcp_rsp_len
= rq_buff_len
1766 = fcp_rsp
->fcp_rsp_len
;
1770 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
) {
1771 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1772 rq_buff_len
+= fcp_rsp
->fcp_sns_len
;
1775 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1776 io_req
->fcp_sns_len
= fcp_sns_len
;
1778 if (rq_buff_len
> num_rq
* BNX2FC_RQ_BUF_SZ
) {
1779 /* Invalid sense sense length. */
1780 printk(KERN_ERR PFX
"invalid sns length %d\n",
1782 /* reset rq_buff_len */
1783 rq_buff_len
= num_rq
* BNX2FC_RQ_BUF_SZ
;
1786 rq_data
= bnx2fc_get_next_rqe(tgt
, 1);
1789 /* We do not need extra sense data */
1790 for (i
= 1; i
< num_rq
; i
++)
1791 dummy
= bnx2fc_get_next_rqe(tgt
, 1);
1794 /* fetch fcp_rsp_code */
1795 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1796 /* Only for task management function */
1797 io_req
->fcp_rsp_code
= rq_data
[3];
1798 printk(KERN_ERR PFX
"fcp_rsp_code = %d\n",
1799 io_req
->fcp_rsp_code
);
1802 /* fetch sense data */
1803 rq_data
+= fcp_rsp_len
;
1805 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1806 printk(KERN_ERR PFX
"Truncating sense buffer\n");
1807 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1810 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1812 memcpy(sc_cmd
->sense_buffer
, rq_data
, fcp_sns_len
);
1814 /* return RQ entries */
1815 for (i
= 0; i
< num_rq
; i
++)
1816 bnx2fc_return_rqe(tgt
, 1);
1821 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1823 * @host: The Scsi_Host the command was issued to
1824 * @sc_cmd: struct scsi_cmnd to be executed
1826 * This is the IO strategy routine, called by SCSI-ML
1828 int bnx2fc_queuecommand(struct Scsi_Host
*host
,
1829 struct scsi_cmnd
*sc_cmd
)
1831 struct fc_lport
*lport
= shost_priv(host
);
1832 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1833 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1834 struct bnx2fc_rport
*tgt
;
1835 struct bnx2fc_cmd
*io_req
;
1839 rval
= fc_remote_port_chkready(rport
);
1841 sc_cmd
->result
= rval
;
1842 sc_cmd
->scsi_done(sc_cmd
);
1846 if ((lport
->state
!= LPORT_ST_READY
) || !(lport
->link_up
)) {
1847 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1851 /* rport and tgt are allocated together, so tgt should be non-NULL */
1852 tgt
= (struct bnx2fc_rport
*)&rp
[1];
1854 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
1856 * Session is not offloaded yet. Let SCSI-ml retry
1859 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1863 io_req
= bnx2fc_cmd_alloc(tgt
);
1865 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1868 io_req
->sc_cmd
= sc_cmd
;
1870 if (bnx2fc_post_io_req(tgt
, io_req
)) {
1871 printk(KERN_ERR PFX
"Unable to post io_req\n");
1872 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1879 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd
*io_req
,
1880 struct fcoe_task_ctx_entry
*task
,
1883 struct fcoe_fcp_rsp_payload
*fcp_rsp
;
1884 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1885 struct scsi_cmnd
*sc_cmd
;
1886 struct Scsi_Host
*host
;
1889 /* scsi_cmd_cmpl is called with tgt lock held */
1891 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL
, &io_req
->req_flags
)) {
1892 /* we will not receive ABTS response for this IO */
1893 BNX2FC_IO_DBG(io_req
, "Timer context finished processing "
1897 /* Cancel the timeout_work, as we received IO completion */
1898 if (cancel_delayed_work(&io_req
->timeout_work
))
1899 kref_put(&io_req
->refcount
,
1900 bnx2fc_cmd_release
); /* drop timer hold */
1902 sc_cmd
= io_req
->sc_cmd
;
1903 if (sc_cmd
== NULL
) {
1904 printk(KERN_ERR PFX
"scsi_cmd_compl - sc_cmd is NULL\n");
1908 /* Fetch fcp_rsp from task context and perform cmd completion */
1909 fcp_rsp
= (struct fcoe_fcp_rsp_payload
*)
1910 &(task
->rxwr_only
.union_ctx
.comp_info
.fcp_rsp
.payload
);
1912 /* parse fcp_rsp and obtain sense data from RQ if available */
1913 bnx2fc_parse_fcp_rsp(io_req
, fcp_rsp
, num_rq
);
1915 host
= sc_cmd
->device
->host
;
1916 if (!sc_cmd
->SCp
.ptr
) {
1917 printk(KERN_ERR PFX
"SCp.ptr is NULL\n");
1921 if (io_req
->on_active_queue
) {
1922 list_del_init(&io_req
->link
);
1923 io_req
->on_active_queue
= 0;
1924 /* Move IO req to retire queue */
1925 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1927 /* This should not happen, but could have been pulled
1928 * by bnx2fc_flush_active_ios(), or during a race
1929 * between command abort and (late) completion.
1931 BNX2FC_IO_DBG(io_req
, "xid not on active_cmd_queue\n");
1932 if (io_req
->wait_for_comp
)
1933 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
1934 &io_req
->req_flags
))
1935 complete(&io_req
->tm_done
);
1938 bnx2fc_unmap_sg_list(io_req
);
1939 io_req
->sc_cmd
= NULL
;
1941 switch (io_req
->fcp_status
) {
1943 if (io_req
->cdb_status
== 0) {
1944 /* Good IO completion */
1945 sc_cmd
->result
= DID_OK
<< 16;
1947 /* Transport status is good, SCSI status not good */
1948 BNX2FC_IO_DBG(io_req
, "scsi_cmpl: cdb_status = %d"
1949 " fcp_resid = 0x%x\n",
1950 io_req
->cdb_status
, io_req
->fcp_resid
);
1951 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1953 if (io_req
->fcp_resid
)
1954 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1957 printk(KERN_ERR PFX
"scsi_cmd_compl: fcp_status = %d\n",
1958 io_req
->fcp_status
);
1961 sc_cmd
->SCp
.ptr
= NULL
;
1962 sc_cmd
->scsi_done(sc_cmd
);
1963 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1966 int bnx2fc_post_io_req(struct bnx2fc_rport
*tgt
,
1967 struct bnx2fc_cmd
*io_req
)
1969 struct fcoe_task_ctx_entry
*task
;
1970 struct fcoe_task_ctx_entry
*task_page
;
1971 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1972 struct fcoe_port
*port
= tgt
->port
;
1973 struct bnx2fc_interface
*interface
= port
->priv
;
1974 struct bnx2fc_hba
*hba
= interface
->hba
;
1975 struct fc_lport
*lport
= port
->lport
;
1976 struct fc_stats
*stats
;
1977 int task_idx
, index
;
1980 /* Initialize rest of io_req fields */
1981 io_req
->cmd_type
= BNX2FC_SCSI_CMD
;
1982 io_req
->port
= port
;
1984 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
1985 sc_cmd
->SCp
.ptr
= (char *)io_req
;
1987 stats
= per_cpu_ptr(lport
->stats
, get_cpu());
1988 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1989 io_req
->io_req_flags
= BNX2FC_READ
;
1990 stats
->InputRequests
++;
1991 stats
->InputBytes
+= io_req
->data_xfer_len
;
1992 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1993 io_req
->io_req_flags
= BNX2FC_WRITE
;
1994 stats
->OutputRequests
++;
1995 stats
->OutputBytes
+= io_req
->data_xfer_len
;
1997 io_req
->io_req_flags
= 0;
1998 stats
->ControlRequests
++;
2004 /* Build buffer descriptor list for firmware from sg list */
2005 if (bnx2fc_build_bd_list_from_sg(io_req
)) {
2006 printk(KERN_ERR PFX
"BD list creation failed\n");
2007 spin_lock_bh(&tgt
->tgt_lock
);
2008 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
2009 spin_unlock_bh(&tgt
->tgt_lock
);
2013 task_idx
= xid
/ BNX2FC_TASKS_PER_PAGE
;
2014 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
2016 /* Initialize task context for this IO request */
2017 task_page
= (struct fcoe_task_ctx_entry
*) hba
->task_ctx
[task_idx
];
2018 task
= &(task_page
[index
]);
2019 bnx2fc_init_task(io_req
, task
);
2021 spin_lock_bh(&tgt
->tgt_lock
);
2023 if (tgt
->flush_in_prog
) {
2024 printk(KERN_ERR PFX
"Flush in progress..Host Busy\n");
2025 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
2026 spin_unlock_bh(&tgt
->tgt_lock
);
2030 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
2031 printk(KERN_ERR PFX
"Session not ready...post_io\n");
2032 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
2033 spin_unlock_bh(&tgt
->tgt_lock
);
2038 if (tgt
->io_timeout
)
2039 bnx2fc_cmd_timer_set(io_req
, BNX2FC_IO_TIMEOUT
);
2040 /* Obtain free SQ entry */
2041 bnx2fc_add_2_sq(tgt
, xid
);
2043 /* Enqueue the io_req to active_cmd_queue */
2045 io_req
->on_active_queue
= 1;
2046 /* move io_req from pending_queue to active_queue */
2047 list_add_tail(&io_req
->link
, &tgt
->active_cmd_queue
);
2050 bnx2fc_ring_doorbell(tgt
);
2051 spin_unlock_bh(&tgt
->tgt_lock
);