fusion: remove dead MTRR code
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_target.c
CommitLineData
2d70c103
NB
1/*
2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3 *
4 * based on qla2x00t.c code:
5 *
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
10 *
11 * Forward port and refactoring to modern qla2xxx and target/configfs
12 *
4c76251e 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
2d70c103
NB
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
18 * of the License.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/types.h>
2d70c103
NB
29#include <linux/blkdev.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <asm/unaligned.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <target/target_core_base.h>
40#include <target/target_core_fabric.h>
41
42#include "qla_def.h"
43#include "qla_target.h"
44
d154f350
AE
45static int ql2xtgt_tape_enable;
46module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
49
2d70c103
NB
50static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51module_param(qlini_mode, charp, S_IRUGO);
52MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
56 "enabled back; "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
59
aa230bc5 60int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
2d70c103 61
33e79977
QT
62static int temp_sam_status = SAM_STAT_BUSY;
63
2d70c103
NB
64/*
65 * From scsi/fc/fc_fcp.h
66 */
67enum fcp_resp_rsp_codes {
68 FCP_TMF_CMPL = 0,
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
72 FCP_TMF_REJECTED = 4,
73 FCP_TMF_FAILED = 5,
74 FCP_TMF_INVALID_LUN = 9,
75};
76
77/*
78 * fc_pri_ta from scsi/fc/fc_fcp.h
79 */
80#define FCP_PTA_SIMPLE 0 /* simple task attribute */
81#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82#define FCP_PTA_ORDERED 2 /* ordered task attribute */
6efb3c0a 83#define FCP_PTA_ACA 4 /* auto. contingent allegiance */
2d70c103
NB
84#define FCP_PTA_MASK 7 /* mask for task attribute field */
85#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
87
88/*
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
93 *
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
96 *
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
100 */
101/* Predefs for callbacks handed to qla2xxx LLD */
102static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt);
104static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked);
109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
b6a029e1
AE
111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
33e79977
QT
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
55a9066f 115static void qlt_disable_vha(struct scsi_qla_host *vha);
2d70c103
NB
116/*
117 * Global Variables
118 */
2d70c103
NB
119static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
120static mempool_t *qla_tgt_mgmt_cmd_mempool;
121static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist);
124
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt,
128 const uint8_t *port_name)
129{
130 struct qla_tgt_sess *sess;
131
132 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
133 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
134 return sess;
135 }
136
137 return NULL;
138}
139
140/* Might release hw lock, then reaquire!! */
141static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
142{
143 /* Send marker if required */
144 if (unlikely(vha->marker_needed != 0)) {
145 int rc = qla2x00_issue_marker(vha, vha_locked);
146 if (rc != QLA_SUCCESS) {
147 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
148 "qla_target(%d): issue_marker() failed\n",
149 vha->vp_idx);
150 }
151 return rc;
152 }
153 return QLA_SUCCESS;
154}
155
156static inline
157struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
158 uint8_t *d_id)
159{
160 struct qla_hw_data *ha = vha->hw;
161 uint8_t vp_idx;
162
163 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
164 return NULL;
165
166 if (vha->d_id.b.al_pa == d_id[2])
167 return vha;
168
169 BUG_ON(ha->tgt.tgt_vp_map == NULL);
170 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
171 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
172 return ha->tgt.tgt_vp_map[vp_idx].vha;
173
174 return NULL;
175}
176
177static inline
178struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
179 uint16_t vp_idx)
180{
181 struct qla_hw_data *ha = vha->hw;
182
183 if (vha->vp_idx == vp_idx)
184 return vha;
185
186 BUG_ON(ha->tgt.tgt_vp_map == NULL);
187 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
188 return ha->tgt.tgt_vp_map[vp_idx].vha;
189
190 return NULL;
191}
192
33e79977
QT
193static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
194{
195 unsigned long flags;
196
197 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
198
199 vha->hw->tgt.num_pend_cmds++;
200 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
201 vha->hw->qla_stats.stat_max_pend_cmds =
202 vha->hw->tgt.num_pend_cmds;
203 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
204}
205static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
210 vha->hw->tgt.num_pend_cmds--;
211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
212}
213
55a9066f 214static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
2d70c103
NB
215 struct atio_from_isp *atio)
216{
f83adb61
QT
217 ql_dbg(ql_dbg_tgt, vha, 0xe072,
218 "%s: qla_target(%d): type %x ox_id %04x\n",
219 __func__, vha->vp_idx, atio->u.raw.entry_type,
220 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
221
2d70c103
NB
222 switch (atio->u.raw.entry_type) {
223 case ATIO_TYPE7:
224 {
225 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
226 atio->u.isp24.fcp_hdr.d_id);
227 if (unlikely(NULL == host)) {
228 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
229 "qla_target(%d): Received ATIO_TYPE7 "
230 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
231 atio->u.isp24.fcp_hdr.d_id[0],
232 atio->u.isp24.fcp_hdr.d_id[1],
233 atio->u.isp24.fcp_hdr.d_id[2]);
234 break;
235 }
236 qlt_24xx_atio_pkt(host, atio);
237 break;
238 }
239
240 case IMMED_NOTIFY_TYPE:
241 {
242 struct scsi_qla_host *host = vha;
243 struct imm_ntfy_from_isp *entry =
244 (struct imm_ntfy_from_isp *)atio;
245
246 if ((entry->u.isp24.vp_index != 0xFF) &&
247 (entry->u.isp24.nport_handle != 0xFFFF)) {
248 host = qlt_find_host_by_vp_idx(vha,
249 entry->u.isp24.vp_index);
250 if (unlikely(!host)) {
251 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
252 "qla_target(%d): Received "
253 "ATIO (IMMED_NOTIFY_TYPE) "
254 "with unknown vp_index %d\n",
255 vha->vp_idx, entry->u.isp24.vp_index);
256 break;
257 }
258 }
259 qlt_24xx_atio_pkt(host, atio);
260 break;
261 }
262
263 default:
264 ql_dbg(ql_dbg_tgt, vha, 0xe040,
265 "qla_target(%d): Received unknown ATIO atio "
266 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
267 break;
268 }
269
270 return;
271}
272
273void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
274{
275 switch (pkt->entry_type) {
f83adb61
QT
276 case CTIO_CRC2:
277 ql_dbg(ql_dbg_tgt, vha, 0xe073,
278 "qla_target(%d):%s: CRC2 Response pkt\n",
279 vha->vp_idx, __func__);
2d70c103
NB
280 case CTIO_TYPE7:
281 {
282 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
283 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
284 entry->vp_index);
285 if (unlikely(!host)) {
286 ql_dbg(ql_dbg_tgt, vha, 0xe041,
287 "qla_target(%d): Response pkt (CTIO_TYPE7) "
288 "received, with unknown vp_index %d\n",
289 vha->vp_idx, entry->vp_index);
290 break;
291 }
292 qlt_response_pkt(host, pkt);
293 break;
294 }
295
296 case IMMED_NOTIFY_TYPE:
297 {
298 struct scsi_qla_host *host = vha;
299 struct imm_ntfy_from_isp *entry =
300 (struct imm_ntfy_from_isp *)pkt;
301
302 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
303 if (unlikely(!host)) {
304 ql_dbg(ql_dbg_tgt, vha, 0xe042,
305 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
306 "received, with unknown vp_index %d\n",
307 vha->vp_idx, entry->u.isp24.vp_index);
308 break;
309 }
310 qlt_response_pkt(host, pkt);
311 break;
312 }
313
314 case NOTIFY_ACK_TYPE:
315 {
316 struct scsi_qla_host *host = vha;
317 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
318
319 if (0xFF != entry->u.isp24.vp_index) {
320 host = qlt_find_host_by_vp_idx(vha,
321 entry->u.isp24.vp_index);
322 if (unlikely(!host)) {
323 ql_dbg(ql_dbg_tgt, vha, 0xe043,
324 "qla_target(%d): Response "
325 "pkt (NOTIFY_ACK_TYPE) "
326 "received, with unknown "
327 "vp_index %d\n", vha->vp_idx,
328 entry->u.isp24.vp_index);
329 break;
330 }
331 }
332 qlt_response_pkt(host, pkt);
333 break;
334 }
335
336 case ABTS_RECV_24XX:
337 {
338 struct abts_recv_from_24xx *entry =
339 (struct abts_recv_from_24xx *)pkt;
340 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
341 entry->vp_index);
342 if (unlikely(!host)) {
343 ql_dbg(ql_dbg_tgt, vha, 0xe044,
344 "qla_target(%d): Response pkt "
345 "(ABTS_RECV_24XX) received, with unknown "
346 "vp_index %d\n", vha->vp_idx, entry->vp_index);
347 break;
348 }
349 qlt_response_pkt(host, pkt);
350 break;
351 }
352
353 case ABTS_RESP_24XX:
354 {
355 struct abts_resp_to_24xx *entry =
356 (struct abts_resp_to_24xx *)pkt;
357 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
358 entry->vp_index);
359 if (unlikely(!host)) {
360 ql_dbg(ql_dbg_tgt, vha, 0xe045,
361 "qla_target(%d): Response pkt "
362 "(ABTS_RECV_24XX) received, with unknown "
363 "vp_index %d\n", vha->vp_idx, entry->vp_index);
364 break;
365 }
366 qlt_response_pkt(host, pkt);
367 break;
368 }
369
370 default:
371 qlt_response_pkt(vha, pkt);
372 break;
373 }
374
375}
376
377static void qlt_free_session_done(struct work_struct *work)
378{
379 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
380 free_work);
381 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw;
384
385 BUG_ON(!tgt);
386 /*
387 * Release the target session for FC Nexus from fabric module code.
388 */
389 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess);
391
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess);
394
395 kfree(sess);
396 /*
397 * We need to protect against race, when tgt is freed before or
398 * inside wake_up()
399 */
400 tgt->sess_count--;
401 if (tgt->sess_count == 0)
402 wake_up_all(&tgt->waitQ);
403}
404
405/* ha->hardware_lock supposed to be held on entry */
406void qlt_unreg_sess(struct qla_tgt_sess *sess)
407{
408 struct scsi_qla_host *vha = sess->vha;
409
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411
412 list_del(&sess->sess_list_entry);
413 if (sess->deleted)
414 list_del(&sess->del_list_entry);
415
416 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work);
418}
419EXPORT_SYMBOL(qlt_unreg_sess);
420
421/* ha->hardware_lock supposed to be held on entry */
422static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
423{
424 struct qla_hw_data *ha = vha->hw;
425 struct qla_tgt_sess *sess = NULL;
426 uint32_t unpacked_lun, lun = 0;
427 uint16_t loop_id;
428 int res = 0;
429 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
430 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
431
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
c5701042 437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
2d70c103
NB
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry);
441 switch (mcmd) {
442 case QLA_TGT_NEXUS_LOSS_SESS:
443 mcmd = QLA_TGT_NEXUS_LOSS;
444 break;
445 case QLA_TGT_ABORT_ALL_SESS:
446 mcmd = QLA_TGT_ABORT_ALL;
447 break;
448 case QLA_TGT_NEXUS_LOSS:
449 case QLA_TGT_ABORT_ALL:
450 break;
451 default:
452 ql_dbg(ql_dbg_tgt, vha, 0xe046,
453 "qla_target(%d): Not allowed "
454 "command %x in %s", vha->vp_idx,
455 mcmd, __func__);
456 sess = NULL;
457 break;
458 }
459 } else
460 sess = NULL;
461#endif
462 } else {
463 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
464 }
465
466 ql_dbg(ql_dbg_tgt, vha, 0xe000,
467 "Using sess for qla_tgt_reset: %p\n", sess);
468 if (!sess) {
469 res = -ESRCH;
470 return res;
471 }
472
473 ql_dbg(ql_dbg_tgt, vha, 0xe047,
7b833558
OK
474 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
475 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
2d70c103
NB
476 mcmd, loop_id);
477
478 lun = a->u.isp24.fcp_cmnd.lun;
479 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
480
481 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
482 iocb, QLA24XX_MGMT_SEND_NACK);
483}
484
485/* ha->hardware_lock supposed to be held on entry */
486static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
487 bool immediate)
488{
489 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491
492 if (sess->deleted)
493 return;
494
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499
500 if (immediate)
501 dev_loss_tmo = 0;
502
503 sess->expires = jiffies + dev_loss_tmo * HZ;
504
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
7b833558 506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
2d70c103 507 "deletion in %u secs (expires: %lu) immed: %d\n",
7b833558
OK
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
509 sess->expires, immediate);
2d70c103
NB
510
511 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0);
513 else
514 schedule_delayed_work(&tgt->sess_del_work,
63832aab 515 sess->expires - jiffies);
2d70c103
NB
516}
517
518/* ha->hardware_lock supposed to be held on entry */
c5701042 519static void qlt_clear_tgt_db(struct qla_tgt *tgt)
2d70c103
NB
520{
521 struct qla_tgt_sess *sess;
522
523 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
524 qlt_schedule_sess_for_deletion(sess, true);
525
526 /* At this point tgt could be already dead */
527}
528
529static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
530 uint16_t *loop_id)
531{
532 struct qla_hw_data *ha = vha->hw;
533 dma_addr_t gid_list_dma;
534 struct gid_list_info *gid_list;
535 char *id_iter;
536 int res, rc, i;
537 uint16_t entries;
538
539 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
540 &gid_list_dma, GFP_KERNEL);
541 if (!gid_list) {
542 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
543 "qla_target(%d): DMA Alloc failed of %u\n",
544 vha->vp_idx, qla2x00_gid_list_size(ha));
545 return -ENOMEM;
546 }
547
548 /* Get list of logged in devices */
549 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
550 if (rc != QLA_SUCCESS) {
551 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
552 "qla_target(%d): get_id_list() failed: %x\n",
553 vha->vp_idx, rc);
554 res = -1;
555 goto out_free_id_list;
556 }
557
558 id_iter = (char *)gid_list;
559 res = -1;
560 for (i = 0; i < entries; i++) {
561 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
562 if ((gid->al_pa == s_id[2]) &&
563 (gid->area == s_id[1]) &&
564 (gid->domain == s_id[0])) {
565 *loop_id = le16_to_cpu(gid->loop_id);
566 res = 0;
567 break;
568 }
569 id_iter += ha->gid_list_info_size;
570 }
571
572out_free_id_list:
573 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
574 gid_list, gid_list_dma);
575 return res;
576}
577
2d70c103
NB
578/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{
581 BUG_ON(!sess->deleted);
582
583 list_del(&sess->del_list_entry);
584 sess->deleted = 0;
585}
586
587static void qlt_del_sess_work_fn(struct delayed_work *work)
588{
589 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
590 sess_del_work);
591 struct scsi_qla_host *vha = tgt->vha;
592 struct qla_hw_data *ha = vha->hw;
593 struct qla_tgt_sess *sess;
63832aab 594 unsigned long flags, elapsed;
2d70c103
NB
595
596 spin_lock_irqsave(&ha->hardware_lock, flags);
597 while (!list_empty(&tgt->del_sess_list)) {
598 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
599 del_list_entry);
63832aab
SU
600 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) {
2d70c103
NB
602 qlt_undelete_sess(sess);
603
08234e3a
JE
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n",
606 sess);
607 ha->tgt.tgt_ops->shutdown_sess(sess);
608 ha->tgt.tgt_ops->put_sess(sess);
2d70c103
NB
609 } else {
610 schedule_delayed_work(&tgt->sess_del_work,
63832aab 611 sess->expires - elapsed);
2d70c103
NB
612 break;
613 }
614 }
615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
616}
617
618/*
619 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
620 * Caller must put it.
621 */
622static struct qla_tgt_sess *qlt_create_sess(
623 struct scsi_qla_host *vha,
624 fc_port_t *fcport,
625 bool local)
626{
627 struct qla_hw_data *ha = vha->hw;
628 struct qla_tgt_sess *sess;
629 unsigned long flags;
630 unsigned char be_sid[3];
631
632 /* Check to avoid double sessions */
633 spin_lock_irqsave(&ha->hardware_lock, flags);
0e8cd71c 634 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
2d70c103
NB
635 sess_list_entry) {
636 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
637 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
638 "Double sess %p found (s_id %x:%x:%x, "
639 "loop_id %d), updating to d_id %x:%x:%x, "
640 "loop_id %d", sess, sess->s_id.b.domain,
641 sess->s_id.b.al_pa, sess->s_id.b.area,
642 sess->loop_id, fcport->d_id.b.domain,
643 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id);
645
646 if (sess->deleted)
647 qlt_undelete_sess(sess);
648
649 kref_get(&sess->se_sess->sess_kref);
c8292d1d
RD
650 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
651 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
652
2d70c103
NB
653 if (sess->local && !local)
654 sess->local = 0;
655 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656
657 return sess;
658 }
659 }
660 spin_unlock_irqrestore(&ha->hardware_lock, flags);
661
662 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
663 if (!sess) {
664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
7b833558
OK
665 "qla_target(%u): session allocation failed, all commands "
666 "from port %8phC will be refused", vha->vp_idx,
667 fcport->port_name);
2d70c103
NB
668
669 return NULL;
670 }
0e8cd71c 671 sess->tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
672 sess->vha = vha;
673 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id;
675 sess->local = local;
676
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
0e8cd71c 679 sess, vha->vha_tgt.qla_tgt);
2d70c103
NB
680
681 be_sid[0] = sess->s_id.b.domain;
682 be_sid[1] = sess->s_id.b.area;
683 be_sid[2] = sess->s_id.b.al_pa;
684 /*
685 * Determine if this fc_port->port_name is allowed to access
686 * target mode using explict NodeACLs+MappedLUNs, or using
687 * TPG demo mode. If this is successful a target mode FC nexus
688 * is created.
689 */
690 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
691 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
692 kfree(sess);
693 return NULL;
694 }
695 /*
696 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
697 * access across ->hardware_lock reaquire.
698 */
699 kref_get(&sess->se_sess->sess_kref);
700
c8292d1d 701 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
2d70c103
NB
702 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
703 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
704
705 spin_lock_irqsave(&ha->hardware_lock, flags);
0e8cd71c
SK
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++;
2d70c103
NB
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
7b833558
OK
711 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
712 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
713 vha->vp_idx, local ? "local " : "", fcport->port_name,
714 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
715 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
2d70c103
NB
716
717 return sess;
718}
719
720/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
722 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{
725 struct qla_hw_data *ha = vha->hw;
0e8cd71c 726 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
727 struct qla_tgt_sess *sess;
728 unsigned long flags;
729
730 if (!vha->hw->tgt.tgt_ops)
731 return;
732
733 if (!tgt || (fcport->port_type != FCT_INITIATOR))
734 return;
735
0e8cd71c
SK
736 if (qla_ini_mode_enabled(vha))
737 return;
738
2d70c103
NB
739 spin_lock_irqsave(&ha->hardware_lock, flags);
740 if (tgt->tgt_stop) {
741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 return;
743 }
744 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
745 if (!sess) {
746 spin_unlock_irqrestore(&ha->hardware_lock, flags);
747
0e8cd71c 748 mutex_lock(&vha->vha_tgt.tgt_mutex);
2d70c103 749 sess = qlt_create_sess(vha, fcport, false);
0e8cd71c 750 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
751
752 spin_lock_irqsave(&ha->hardware_lock, flags);
753 } else {
754 kref_get(&sess->se_sess->sess_kref);
755
756 if (sess->deleted) {
757 qlt_undelete_sess(sess);
758
759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
7b833558
OK
760 "qla_target(%u): %ssession for port %8phC "
761 "(loop ID %d) reappeared\n", vha->vp_idx,
762 sess->local ? "local " : "", sess->port_name,
2d70c103
NB
763 sess->loop_id);
764
765 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
766 "Reappeared sess %p\n", sess);
767 }
c8292d1d
RD
768 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
769 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
2d70c103
NB
770 }
771
772 if (sess && sess->local) {
773 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
774 "qla_target(%u): local session for "
7b833558
OK
775 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
776 fcport->port_name, sess->loop_id);
2d70c103
NB
777 sess->local = 0;
778 }
2d70c103 779 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
781}
782
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
784{
785 struct qla_hw_data *ha = vha->hw;
0e8cd71c 786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
787 struct qla_tgt_sess *sess;
788 unsigned long flags;
789
790 if (!vha->hw->tgt.tgt_ops)
791 return;
792
793 if (!tgt || (fcport->port_type != FCT_INITIATOR))
794 return;
795
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return;
800 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
804 return;
805 }
806
807 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
808
809 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812}
813
814static inline int test_tgt_sess_count(struct qla_tgt *tgt)
815{
816 struct qla_hw_data *ha = tgt->ha;
817 unsigned long flags;
818 int res;
819 /*
820 * We need to protect against race, when tgt is freed before or
821 * inside wake_up()
822 */
823 spin_lock_irqsave(&ha->hardware_lock, flags);
824 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
825 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
826 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
827 res = (tgt->sess_count == 0);
828 spin_unlock_irqrestore(&ha->hardware_lock, flags);
829
830 return res;
831}
832
833/* Called by tcm_qla2xxx configfs code */
3c231bda 834int qlt_stop_phase1(struct qla_tgt *tgt)
2d70c103
NB
835{
836 struct scsi_qla_host *vha = tgt->vha;
837 struct qla_hw_data *ha = tgt->ha;
838 unsigned long flags;
839
3c231bda
NB
840 mutex_lock(&qla_tgt_mutex);
841 if (!vha->fc_vport) {
842 struct Scsi_Host *sh = vha->host;
843 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
844 bool npiv_vports;
845
846 spin_lock_irqsave(sh->host_lock, flags);
847 npiv_vports = (fc_host->npiv_vports_inuse);
848 spin_unlock_irqrestore(sh->host_lock, flags);
849
850 if (npiv_vports) {
851 mutex_unlock(&qla_tgt_mutex);
852 return -EPERM;
853 }
854 }
2d70c103
NB
855 if (tgt->tgt_stop || tgt->tgt_stopped) {
856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
857 "Already in tgt->tgt_stop or tgt_stopped state\n");
3c231bda
NB
858 mutex_unlock(&qla_tgt_mutex);
859 return -EPERM;
2d70c103
NB
860 }
861
862 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
863 vha->host_no, vha);
864 /*
865 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
866 * Lock is needed, because we still can get an incoming packet.
867 */
0e8cd71c 868 mutex_lock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
869 spin_lock_irqsave(&ha->hardware_lock, flags);
870 tgt->tgt_stop = 1;
c5701042 871 qlt_clear_tgt_db(tgt);
2d70c103 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
0e8cd71c 873 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3c231bda 874 mutex_unlock(&qla_tgt_mutex);
2d70c103 875
43829731 876 flush_delayed_work(&tgt->sess_del_work);
2d70c103
NB
877
878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
879 "Waiting for sess works (tgt %p)", tgt);
880 spin_lock_irqsave(&tgt->sess_work_lock, flags);
881 while (!list_empty(&tgt->sess_works_list)) {
882 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
883 flush_scheduled_work();
884 spin_lock_irqsave(&tgt->sess_work_lock, flags);
885 }
886 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
887
888 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
889 "Waiting for tgt %p: list_empty(sess_list)=%d "
890 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
891 tgt->sess_count);
892
893 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
894
895 /* Big hammer */
896 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
897 qlt_disable_vha(vha);
898
899 /* Wait for sessions to clear out (just in case) */
900 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
3c231bda 901 return 0;
2d70c103
NB
902}
903EXPORT_SYMBOL(qlt_stop_phase1);
904
905/* Called by tcm_qla2xxx configfs code */
906void qlt_stop_phase2(struct qla_tgt *tgt)
907{
908 struct qla_hw_data *ha = tgt->ha;
0e8cd71c 909 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2d70c103
NB
910 unsigned long flags;
911
912 if (tgt->tgt_stopped) {
0e8cd71c 913 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
2d70c103
NB
914 "Already in tgt->tgt_stopped state\n");
915 dump_stack();
916 return;
917 }
918
0e8cd71c 919 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
2d70c103
NB
920 "Waiting for %d IRQ commands to complete (tgt %p)",
921 tgt->irq_cmd_count, tgt);
922
0e8cd71c 923 mutex_lock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
924 spin_lock_irqsave(&ha->hardware_lock, flags);
925 while (tgt->irq_cmd_count != 0) {
926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
927 udelay(2);
928 spin_lock_irqsave(&ha->hardware_lock, flags);
929 }
930 tgt->tgt_stop = 0;
931 tgt->tgt_stopped = 1;
932 spin_unlock_irqrestore(&ha->hardware_lock, flags);
0e8cd71c 933 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2d70c103 934
0e8cd71c 935 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
2d70c103
NB
936 tgt);
937}
938EXPORT_SYMBOL(qlt_stop_phase2);
939
940/* Called from qlt_remove_target() -> qla2x00_remove_one() */
fa492630 941static void qlt_release(struct qla_tgt *tgt)
2d70c103 942{
0e8cd71c 943 scsi_qla_host_t *vha = tgt->vha;
2d70c103 944
0e8cd71c 945 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
2d70c103
NB
946 qlt_stop_phase2(tgt);
947
0e8cd71c 948 vha->vha_tgt.qla_tgt = NULL;
2d70c103 949
0e8cd71c 950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
2d70c103
NB
951 "Release of tgt %p finished\n", tgt);
952
953 kfree(tgt);
954}
955
956/* ha->hardware_lock supposed to be held on entry */
957static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
958 const void *param, unsigned int param_size)
959{
960 struct qla_tgt_sess_work_param *prm;
961 unsigned long flags;
962
963 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
964 if (!prm) {
965 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
966 "qla_target(%d): Unable to create session "
967 "work, command will be refused", 0);
968 return -ENOMEM;
969 }
970
971 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
972 "Scheduling work (type %d, prm %p)"
973 " to find session for param %p (size %d, tgt %p)\n",
974 type, prm, param, param_size, tgt);
975
976 prm->type = type;
977 memcpy(&prm->tm_iocb, param, param_size);
978
979 spin_lock_irqsave(&tgt->sess_work_lock, flags);
980 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
981 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
982
983 schedule_work(&tgt->sess_work);
984
985 return 0;
986}
987
988/*
989 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
990 */
991static void qlt_send_notify_ack(struct scsi_qla_host *vha,
992 struct imm_ntfy_from_isp *ntfy,
993 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
994 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
995{
996 struct qla_hw_data *ha = vha->hw;
997 request_t *pkt;
998 struct nack_to_isp *nack;
999
1000 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1001
1002 /* Send marker if required */
1003 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1004 return;
1005
1006 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1007 if (!pkt) {
1008 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1009 "qla_target(%d): %s failed: unable to allocate "
1010 "request packet\n", vha->vp_idx, __func__);
1011 return;
1012 }
1013
0e8cd71c
SK
1014 if (vha->vha_tgt.qla_tgt != NULL)
1015 vha->vha_tgt.qla_tgt->notify_ack_expected++;
2d70c103
NB
1016
1017 pkt->entry_type = NOTIFY_ACK_TYPE;
1018 pkt->entry_count = 1;
1019
1020 nack = (struct nack_to_isp *)pkt;
1021 nack->ox_id = ntfy->ox_id;
1022
1023 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1024 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1025 nack->u.isp24.flags = ntfy->u.isp24.flags &
1026 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1027 }
1028 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1029 nack->u.isp24.status = ntfy->u.isp24.status;
1030 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
aa230bc5 1031 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2d70c103
NB
1032 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1033 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1034 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1035 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1036 nack->u.isp24.srr_reject_code = srr_reject_code;
1037 nack->u.isp24.srr_reject_code_expl = srr_explan;
1038 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1039
1040 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1041 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1042 vha->vp_idx, nack->u.isp24.status);
1043
63163e06
HM
1044 /* Memory Barrier */
1045 wmb();
2d70c103
NB
1046 qla2x00_start_iocbs(vha, vha->req);
1047}
1048
1049/*
1050 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1051 */
1052static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1053 struct abts_recv_from_24xx *abts, uint32_t status,
1054 bool ids_reversed)
1055{
1056 struct qla_hw_data *ha = vha->hw;
1057 struct abts_resp_to_24xx *resp;
1058 uint32_t f_ctl;
1059 uint8_t *p;
1060
1061 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1062 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1063 ha, abts, status);
1064
1065 /* Send marker if required */
1066 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1067 return;
1068
b6a029e1 1069 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
2d70c103
NB
1070 if (!resp) {
1071 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1072 "qla_target(%d): %s failed: unable to allocate "
1073 "request packet", vha->vp_idx, __func__);
1074 return;
1075 }
1076
1077 resp->entry_type = ABTS_RESP_24XX;
1078 resp->entry_count = 1;
1079 resp->nport_handle = abts->nport_handle;
1080 resp->vp_index = vha->vp_idx;
1081 resp->sof_type = abts->sof_type;
1082 resp->exchange_address = abts->exchange_address;
1083 resp->fcp_hdr_le = abts->fcp_hdr_le;
1084 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1085 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1086 F_CTL_SEQ_INITIATIVE);
1087 p = (uint8_t *)&f_ctl;
1088 resp->fcp_hdr_le.f_ctl[0] = *p++;
1089 resp->fcp_hdr_le.f_ctl[1] = *p++;
1090 resp->fcp_hdr_le.f_ctl[2] = *p;
1091 if (ids_reversed) {
1092 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1093 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1094 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1095 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1096 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1097 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1098 } else {
1099 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1100 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1101 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1102 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1103 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1104 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1105 }
1106 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1107 if (status == FCP_TMF_CMPL) {
1108 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1109 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1110 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1111 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1112 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1113 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1114 } else {
1115 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1116 resp->payload.ba_rjt.reason_code =
1117 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1118 /* Other bytes are zero */
1119 }
1120
0e8cd71c 1121 vha->vha_tgt.qla_tgt->abts_resp_expected++;
2d70c103 1122
63163e06
HM
1123 /* Memory Barrier */
1124 wmb();
2d70c103
NB
1125 qla2x00_start_iocbs(vha, vha->req);
1126}
1127
1128/*
1129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1130 */
1131static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1132 struct abts_resp_from_24xx_fw *entry)
1133{
1134 struct ctio7_to_24xx *ctio;
1135
1136 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1137 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1138 /* Send marker if required */
1139 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1140 return;
1141
b6a029e1 1142 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
2d70c103
NB
1143 if (ctio == NULL) {
1144 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1145 "qla_target(%d): %s failed: unable to allocate "
1146 "request packet\n", vha->vp_idx, __func__);
1147 return;
1148 }
1149
1150 /*
1151 * We've got on entrance firmware's response on by us generated
1152 * ABTS response. So, in it ID fields are reversed.
1153 */
1154
1155 ctio->entry_type = CTIO_TYPE7;
1156 ctio->entry_count = 1;
1157 ctio->nport_handle = entry->nport_handle;
1158 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1159 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1160 ctio->vp_index = vha->vp_idx;
1161 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1162 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1163 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1164 ctio->exchange_addr = entry->exchange_addr_to_abort;
1165 ctio->u.status1.flags =
1166 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1167 CTIO7_FLAGS_TERMINATE);
33a5fcee 1168 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
2d70c103 1169
63163e06
HM
1170 /* Memory Barrier */
1171 wmb();
2d70c103
NB
1172 qla2x00_start_iocbs(vha, vha->req);
1173
1174 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1175 FCP_TMF_CMPL, true);
1176}
1177
1178/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1181{
1182 struct qla_hw_data *ha = vha->hw;
06e97b48 1183 struct se_session *se_sess = sess->se_sess;
2d70c103 1184 struct qla_tgt_mgmt_cmd *mcmd;
06e97b48
SH
1185 struct se_cmd *se_cmd;
1186 u32 lun = 0;
2d70c103 1187 int rc;
06e97b48
SH
1188 bool found_lun = false;
1189
1190 spin_lock(&se_sess->sess_cmd_lock);
1191 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1192 struct qla_tgt_cmd *cmd =
1193 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1194 if (cmd->tag == abts->exchange_addr_to_abort) {
1195 lun = cmd->unpacked_lun;
1196 found_lun = true;
1197 break;
1198 }
1199 }
1200 spin_unlock(&se_sess->sess_cmd_lock);
1201
1202 if (!found_lun)
1203 return -ENOENT;
2d70c103
NB
1204
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n",
1207 vha->vp_idx, abts->exchange_addr_to_abort);
1208
1209 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1210 if (mcmd == NULL) {
1211 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1212 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1213 vha->vp_idx, __func__);
1214 return -ENOMEM;
1215 }
1216 memset(mcmd, 0, sizeof(*mcmd));
1217
1218 mcmd->sess = sess;
1219 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
80187f8e 1220 mcmd->reset_count = vha->hw->chip_reset;
2d70c103 1221
06e97b48 1222 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
2d70c103
NB
1223 abts->exchange_addr_to_abort);
1224 if (rc != 0) {
1225 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1226 "qla_target(%d): tgt_ops->handle_tmr()"
1227 " failed: %d", vha->vp_idx, rc);
1228 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1229 return -EFAULT;
1230 }
1231
1232 return 0;
1233}
1234
1235/*
1236 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1237 */
1238static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1239 struct abts_recv_from_24xx *abts)
1240{
1241 struct qla_hw_data *ha = vha->hw;
1242 struct qla_tgt_sess *sess;
1243 uint32_t tag = abts->exchange_addr_to_abort;
1244 uint8_t s_id[3];
1245 int rc;
1246
1247 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1248 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1249 "qla_target(%d): ABTS: Abort Sequence not "
1250 "supported\n", vha->vp_idx);
1251 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1252 return;
1253 }
1254
1255 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1257 "qla_target(%d): ABTS: Unknown Exchange "
1258 "Address received\n", vha->vp_idx);
1259 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1260 return;
1261 }
1262
1263 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1264 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1265 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1266 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1267 le32_to_cpu(abts->fcp_hdr_le.parameter));
1268
1269 s_id[0] = abts->fcp_hdr_le.s_id[2];
1270 s_id[1] = abts->fcp_hdr_le.s_id[1];
1271 s_id[2] = abts->fcp_hdr_le.s_id[0];
1272
1273 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1274 if (!sess) {
1275 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1276 "qla_target(%d): task abort for non-existant session\n",
1277 vha->vp_idx);
0e8cd71c 1278 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
2d70c103
NB
1279 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1280 if (rc != 0) {
1281 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1282 false);
1283 }
1284 return;
1285 }
1286
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1290 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1291 vha->vp_idx, rc);
1292 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1293 return;
1294 }
1295}
1296
1297/*
1298 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1299 */
1300static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1301 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1302{
1303 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1304 struct ctio7_to_24xx *ctio;
33a5fcee 1305 uint16_t temp;
2d70c103
NB
1306
1307 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1308 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1309 ha, atio, resp_code);
1310
1311 /* Send marker if required */
1312 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1313 return;
1314
1315 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1316 if (ctio == NULL) {
1317 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1318 "qla_target(%d): %s failed: unable to allocate "
1319 "request packet\n", ha->vp_idx, __func__);
1320 return;
1321 }
1322
1323 ctio->entry_type = CTIO_TYPE7;
1324 ctio->entry_count = 1;
1325 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1326 ctio->nport_handle = mcmd->sess->loop_id;
1327 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1328 ctio->vp_index = ha->vp_idx;
1329 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1330 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1331 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1332 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1333 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1334 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1335 CTIO7_FLAGS_SEND_STATUS);
33a5fcee
QT
1336 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1337 ctio->u.status1.ox_id = cpu_to_le16(temp);
2d70c103
NB
1338 ctio->u.status1.scsi_status =
1339 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1340 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
e4b11b89 1341 ctio->u.status1.sense_data[0] = resp_code;
2d70c103 1342
63163e06
HM
1343 /* Memory Barrier */
1344 wmb();
2d70c103
NB
1345 qla2x00_start_iocbs(ha, ha->req);
1346}
1347
1348void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1349{
1350 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1351}
1352EXPORT_SYMBOL(qlt_free_mcmd);
1353
1354/* callback from target fabric module code */
1355void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1356{
1357 struct scsi_qla_host *vha = mcmd->sess->vha;
1358 struct qla_hw_data *ha = vha->hw;
1359 unsigned long flags;
1360
1361 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1362 "TM response mcmd (%p) status %#x state %#x",
1363 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1364
1365 spin_lock_irqsave(&ha->hardware_lock, flags);
b6a029e1
AE
1366
1367 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1368 /*
1369 * Either a chip reset is active or this request was from
1370 * previous life, just abort the processing.
1371 */
1372 ql_dbg(ql_dbg_async, vha, 0xe100,
1373 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1374 qla2x00_reset_active(vha), mcmd->reset_count,
1375 ha->chip_reset);
1376 ha->tgt.tgt_ops->free_mcmd(mcmd);
1377 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1378 return;
1379 }
1380
2d70c103
NB
1381 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1382 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1383 0, 0, 0, 0, 0, 0);
1384 else {
1385 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1386 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1387 mcmd->fc_tm_rsp, false);
1388 else
1389 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1390 mcmd->fc_tm_rsp);
1391 }
1392 /*
1393 * Make the callback for ->free_mcmd() to queue_work() and invoke
1394 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1395 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1396 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1397 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1398 * qlt_xmit_tm_rsp() returns here..
1399 */
1400 ha->tgt.tgt_ops->free_mcmd(mcmd);
1401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1402}
1403EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1404
1405/* No locks */
1406static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1407{
1408 struct qla_tgt_cmd *cmd = prm->cmd;
1409
1410 BUG_ON(cmd->sg_cnt == 0);
1411
1412 prm->sg = (struct scatterlist *)cmd->sg;
1413 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1414 cmd->sg_cnt, cmd->dma_data_direction);
1415 if (unlikely(prm->seg_cnt == 0))
1416 goto out_err;
1417
1418 prm->cmd->sg_mapped = 1;
1419
f83adb61
QT
1420 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1421 /*
1422 * If greater than four sg entries then we need to allocate
1423 * the continuation entries
1424 */
1425 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1426 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1427 prm->tgt->datasegs_per_cmd,
1428 prm->tgt->datasegs_per_cont);
1429 } else {
1430 /* DIF */
1431 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1432 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1433 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1434 prm->tot_dsds = prm->seg_cnt;
1435 } else
1436 prm->tot_dsds = prm->seg_cnt;
1437
1438 if (cmd->prot_sg_cnt) {
1439 prm->prot_sg = cmd->prot_sg;
1440 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1441 cmd->prot_sg, cmd->prot_sg_cnt,
1442 cmd->dma_data_direction);
1443 if (unlikely(prm->prot_seg_cnt == 0))
1444 goto out_err;
1445
1446 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1447 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1448 /* Dif Bundling not support here */
1449 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1450 cmd->blk_sz);
1451 prm->tot_dsds += prm->prot_seg_cnt;
1452 } else
1453 prm->tot_dsds += prm->prot_seg_cnt;
1454 }
1455 }
2d70c103 1456
2d70c103
NB
1457 return 0;
1458
1459out_err:
1460 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1461 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1462 0, prm->cmd->sg_cnt);
1463 return -1;
1464}
1465
f9b6721a 1466static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2d70c103
NB
1467{
1468 struct qla_hw_data *ha = vha->hw;
1469
f9b6721a
JE
1470 if (!cmd->sg_mapped)
1471 return;
1472
2d70c103
NB
1473 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1474 cmd->sg_mapped = 0;
f83adb61
QT
1475
1476 if (cmd->prot_sg_cnt)
1477 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1478 cmd->dma_data_direction);
1479
1480 if (cmd->ctx_dsd_alloced)
1481 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1482
1483 if (cmd->ctx)
1484 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2d70c103
NB
1485}
1486
1487static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1488 uint32_t req_cnt)
1489{
d29fb736 1490 uint32_t cnt, cnt_in;
2d70c103
NB
1491
1492 if (vha->req->cnt < (req_cnt + 2)) {
75554b68 1493 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
d29fb736 1494 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
2d70c103 1495
2d70c103
NB
1496 if (vha->req->ring_index < cnt)
1497 vha->req->cnt = cnt - vha->req->ring_index;
1498 else
1499 vha->req->cnt = vha->req->length -
1500 (vha->req->ring_index - cnt);
1501 }
1502
1503 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
667024a3 1504 ql_dbg(ql_dbg_io, vha, 0x305a,
d29fb736
SK
1505 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1506 vha->vp_idx, vha->req->ring_index,
1507 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
2d70c103
NB
1508 return -EAGAIN;
1509 }
1510 vha->req->cnt -= req_cnt;
1511
1512 return 0;
1513}
1514
1515/*
1516 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1517 */
1518static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1519{
1520 /* Adjust ring index. */
1521 vha->req->ring_index++;
1522 if (vha->req->ring_index == vha->req->length) {
1523 vha->req->ring_index = 0;
1524 vha->req->ring_ptr = vha->req->ring;
1525 } else {
1526 vha->req->ring_ptr++;
1527 }
1528 return (cont_entry_t *)vha->req->ring_ptr;
1529}
1530
1531/* ha->hardware_lock supposed to be held on entry */
1532static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1533{
1534 struct qla_hw_data *ha = vha->hw;
1535 uint32_t h;
1536
1537 h = ha->tgt.current_handle;
1538 /* always increment cmd handle */
1539 do {
1540 ++h;
8d93f550 1541 if (h > DEFAULT_OUTSTANDING_COMMANDS)
2d70c103
NB
1542 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1543 if (h == ha->tgt.current_handle) {
667024a3 1544 ql_dbg(ql_dbg_io, vha, 0x305b,
2d70c103
NB
1545 "qla_target(%d): Ran out of "
1546 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1547 h = QLA_TGT_NULL_HANDLE;
1548 break;
1549 }
1550 } while ((h == QLA_TGT_NULL_HANDLE) ||
1551 (h == QLA_TGT_SKIP_HANDLE) ||
1552 (ha->tgt.cmds[h-1] != NULL));
1553
1554 if (h != QLA_TGT_NULL_HANDLE)
1555 ha->tgt.current_handle = h;
1556
1557 return h;
1558}
1559
1560/* ha->hardware_lock supposed to be held on entry */
1561static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1562 struct scsi_qla_host *vha)
1563{
1564 uint32_t h;
1565 struct ctio7_to_24xx *pkt;
1566 struct qla_hw_data *ha = vha->hw;
1567 struct atio_from_isp *atio = &prm->cmd->atio;
33a5fcee 1568 uint16_t temp;
2d70c103
NB
1569
1570 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1571 prm->pkt = pkt;
1572 memset(pkt, 0, sizeof(*pkt));
1573
1574 pkt->entry_type = CTIO_TYPE7;
1575 pkt->entry_count = (uint8_t)prm->req_cnt;
1576 pkt->vp_index = vha->vp_idx;
1577
1578 h = qlt_make_handle(vha);
1579 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1580 /*
1581 * CTIO type 7 from the firmware doesn't provide a way to
1582 * know the initiator's LOOP ID, hence we can't find
1583 * the session and, so, the command.
1584 */
1585 return -EAGAIN;
1586 } else
1587 ha->tgt.cmds[h-1] = prm->cmd;
1588
1589 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1590 pkt->nport_handle = prm->cmd->loop_id;
1591 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1592 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1593 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1594 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1595 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1596 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
33a5fcee
QT
1597 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1598 pkt->u.status0.ox_id = cpu_to_le16(temp);
2d70c103
NB
1599 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1600
2d70c103
NB
1601 return 0;
1602}
1603
1604/*
1605 * ha->hardware_lock supposed to be held on entry. We have already made sure
1606 * that there is sufficient amount of request entries to not drop it.
1607 */
1608static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1609 struct scsi_qla_host *vha)
1610{
1611 int cnt;
1612 uint32_t *dword_ptr;
1613 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1614
1615 /* Build continuation packets */
1616 while (prm->seg_cnt > 0) {
1617 cont_a64_entry_t *cont_pkt64 =
1618 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1619
1620 /*
1621 * Make sure that from cont_pkt64 none of
1622 * 64-bit specific fields used for 32-bit
1623 * addressing. Cast to (cont_entry_t *) for
1624 * that.
1625 */
1626
1627 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1628
1629 cont_pkt64->entry_count = 1;
1630 cont_pkt64->sys_define = 0;
1631
1632 if (enable_64bit_addressing) {
1633 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1634 dword_ptr =
1635 (uint32_t *)&cont_pkt64->dseg_0_address;
1636 } else {
1637 cont_pkt64->entry_type = CONTINUE_TYPE;
1638 dword_ptr =
1639 (uint32_t *)&((cont_entry_t *)
1640 cont_pkt64)->dseg_0_address;
1641 }
1642
1643 /* Load continuation entry data segments */
1644 for (cnt = 0;
1645 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1646 cnt++, prm->seg_cnt--) {
1647 *dword_ptr++ =
1648 cpu_to_le32(pci_dma_lo32
1649 (sg_dma_address(prm->sg)));
1650 if (enable_64bit_addressing) {
1651 *dword_ptr++ =
1652 cpu_to_le32(pci_dma_hi32
1653 (sg_dma_address
1654 (prm->sg)));
1655 }
1656 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1657
2d70c103
NB
1658 prm->sg = sg_next(prm->sg);
1659 }
1660 }
1661}
1662
1663/*
1664 * ha->hardware_lock supposed to be held on entry. We have already made sure
1665 * that there is sufficient amount of request entries to not drop it.
1666 */
1667static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1668 struct scsi_qla_host *vha)
1669{
1670 int cnt;
1671 uint32_t *dword_ptr;
1672 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1673 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1674
2d70c103
NB
1675 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1676
1677 /* Setup packet address segment pointer */
1678 dword_ptr = pkt24->u.status0.dseg_0_address;
1679
1680 /* Set total data segment count */
1681 if (prm->seg_cnt)
1682 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1683
1684 if (prm->seg_cnt == 0) {
1685 /* No data transfer */
1686 *dword_ptr++ = 0;
1687 *dword_ptr = 0;
1688 return;
1689 }
1690
1691 /* If scatter gather */
2d70c103
NB
1692
1693 /* Load command entry data segments */
1694 for (cnt = 0;
1695 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1696 cnt++, prm->seg_cnt--) {
1697 *dword_ptr++ =
1698 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1699 if (enable_64bit_addressing) {
1700 *dword_ptr++ =
1701 cpu_to_le32(pci_dma_hi32(
1702 sg_dma_address(prm->sg)));
1703 }
1704 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1705
2d70c103
NB
1706 prm->sg = sg_next(prm->sg);
1707 }
1708
1709 qlt_load_cont_data_segments(prm, vha);
1710}
1711
1712static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1713{
1714 return cmd->bufflen > 0;
1715}
1716
1717/*
1718 * Called without ha->hardware_lock held
1719 */
1720static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1721 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1722 uint32_t *full_req_cnt)
1723{
1724 struct qla_tgt *tgt = cmd->tgt;
1725 struct scsi_qla_host *vha = tgt->vha;
1726 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd;
1728
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange "
1732 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1733 se_cmd, cmd->tag);
1734
1735 cmd->state = QLA_TGT_STATE_ABORTED;
e07f8f65 1736 cmd->cmd_flags |= BIT_6;
2d70c103
NB
1737
1738 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1739
1740 /* !! At this point cmd could be already freed !! */
1741 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1742 }
1743
2d70c103
NB
1744 prm->cmd = cmd;
1745 prm->tgt = tgt;
1746 prm->rq_result = scsi_status;
1747 prm->sense_buffer = &cmd->sense_buffer[0];
1748 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1749 prm->sg = NULL;
1750 prm->seg_cnt = -1;
1751 prm->req_cnt = 1;
1752 prm->add_status_pkt = 0;
1753
2d70c103
NB
1754 /* Send marker if required */
1755 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1756 return -EFAULT;
1757
2d70c103
NB
1758 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1759 if (qlt_pci_map_calc_cnt(prm) != 0)
1760 return -EAGAIN;
1761 }
1762
1763 *full_req_cnt = prm->req_cnt;
1764
1765 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1766 prm->residual = se_cmd->residual_count;
667024a3 1767 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2d70c103
NB
1768 "Residual underflow: %d (tag %d, "
1769 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1770 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1771 cmd->bufflen, prm->rq_result);
1772 prm->rq_result |= SS_RESIDUAL_UNDER;
1773 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1774 prm->residual = se_cmd->residual_count;
667024a3 1775 ql_dbg(ql_dbg_io, vha, 0x305d,
2d70c103
NB
1776 "Residual overflow: %d (tag %d, "
1777 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1778 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1779 cmd->bufflen, prm->rq_result);
1780 prm->rq_result |= SS_RESIDUAL_OVER;
1781 }
1782
1783 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1784 /*
1785 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1786 * ignored in *xmit_response() below
1787 */
1788 if (qlt_has_data(cmd)) {
1789 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1790 (IS_FWI2_CAPABLE(ha) &&
1791 (prm->rq_result != 0))) {
1792 prm->add_status_pkt = 1;
1793 (*full_req_cnt)++;
1794 }
1795 }
1796 }
1797
2d70c103
NB
1798 return 0;
1799}
1800
1801static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1802 struct qla_tgt_cmd *cmd, int sending_sense)
1803{
1804 if (ha->tgt.enable_class_2)
1805 return 0;
1806
1807 if (sending_sense)
1808 return cmd->conf_compl_supported;
1809 else
1810 return ha->tgt.enable_explicit_conf &&
1811 cmd->conf_compl_supported;
1812}
1813
1814#ifdef CONFIG_QLA_TGT_DEBUG_SRR
1815/*
1816 * Original taken from the XFS code
1817 */
1818static unsigned long qlt_srr_random(void)
1819{
1820 static int Inited;
1821 static unsigned long RandomValue;
1822 static DEFINE_SPINLOCK(lock);
1823 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1824 register long rv;
1825 register long lo;
1826 register long hi;
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&lock, flags);
1830 if (!Inited) {
1831 RandomValue = jiffies;
1832 Inited = 1;
1833 }
1834 rv = RandomValue;
1835 hi = rv / 127773;
1836 lo = rv % 127773;
1837 rv = 16807 * lo - 2836 * hi;
1838 if (rv <= 0)
1839 rv += 2147483647;
1840 RandomValue = rv;
1841 spin_unlock_irqrestore(&lock, flags);
1842 return rv;
1843}
1844
1845static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1846{
1847#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1848 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1849 == 50) {
1850 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1851 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1852 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1853 }
1854#endif
1855 /*
1856 * It's currently not possible to simulate SRRs for FCP_WRITE without
1857 * a physical link layer failure, so don't even try here..
1858 */
1859 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1860 return;
1861
1862 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1863 ((qlt_srr_random() % 100) == 20)) {
1864 int i, leave = 0;
1865 unsigned int tot_len = 0;
1866
1867 while (leave == 0)
1868 leave = qlt_srr_random() % cmd->sg_cnt;
1869
1870 for (i = 0; i < leave; i++)
1871 tot_len += cmd->sg[i].length;
1872
1873 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1874 "Cutting cmd %p (tag %d) buffer"
1875 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1876 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1877 cmd->bufflen, cmd->sg_cnt);
1878
1879 cmd->bufflen = tot_len;
1880 cmd->sg_cnt = leave;
1881 }
1882
1883 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1884 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1885
1886 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1887 "Cutting cmd %p (tag %d) buffer head "
1888 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1889 cmd->bufflen);
1890 if (offset == 0)
1891 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1892 else if (qlt_set_data_offset(cmd, offset)) {
1893 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1894 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1895 }
1896 }
1897}
1898#else
1899static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1900{}
1901#endif
1902
1903static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1904 struct qla_tgt_prm *prm)
1905{
1906 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1907 (uint32_t)sizeof(ctio->u.status1.sense_data));
1908 ctio->u.status0.flags |=
1909 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1910 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1911 ctio->u.status0.flags |= __constant_cpu_to_le16(
1912 CTIO7_FLAGS_EXPLICIT_CONFORM |
1913 CTIO7_FLAGS_CONFORM_REQ);
1914 }
1915 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1916 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1917 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1918 int i;
1919
1920 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1921 if (prm->cmd->se_cmd.scsi_status != 0) {
1922 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1923 "Skipping EXPLICIT_CONFORM and "
1924 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1925 "non GOOD status\n");
1926 goto skip_explict_conf;
1927 }
1928 ctio->u.status1.flags |= __constant_cpu_to_le16(
1929 CTIO7_FLAGS_EXPLICIT_CONFORM |
1930 CTIO7_FLAGS_CONFORM_REQ);
1931 }
1932skip_explict_conf:
1933 ctio->u.status1.flags &=
1934 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1935 ctio->u.status1.flags |=
1936 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1937 ctio->u.status1.scsi_status |=
1938 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1939 ctio->u.status1.sense_length =
1940 cpu_to_le16(prm->sense_buffer_len);
1941 for (i = 0; i < prm->sense_buffer_len/4; i++)
1942 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1943 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1944#if 0
1945 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1946 static int q;
1947 if (q < 10) {
1948 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
1949 "qla_target(%d): %d bytes of sense "
1950 "lost", prm->tgt->ha->vp_idx,
1951 prm->sense_buffer_len % 4);
1952 q++;
1953 }
1954 }
1955#endif
1956 } else {
1957 ctio->u.status1.flags &=
1958 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1959 ctio->u.status1.flags |=
1960 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1961 ctio->u.status1.sense_length = 0;
1962 memset(ctio->u.status1.sense_data, 0,
1963 sizeof(ctio->u.status1.sense_data));
1964 }
1965
1966 /* Sense with len > 24, is it possible ??? */
1967}
1968
f83adb61
QT
1969
1970
1971/* diff */
1972static inline int
1973qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
1974{
1975 /*
1976 * Uncomment when corresponding SCSI changes are done.
1977 *
1978 if (!sp->cmd->prot_chk)
1979 return 0;
1980 *
1981 */
1982 switch (se_cmd->prot_op) {
1983 case TARGET_PROT_DOUT_INSERT:
1984 case TARGET_PROT_DIN_STRIP:
1985 if (ql2xenablehba_err_chk >= 1)
1986 return 1;
1987 break;
1988 case TARGET_PROT_DOUT_PASS:
1989 case TARGET_PROT_DIN_PASS:
1990 if (ql2xenablehba_err_chk >= 2)
1991 return 1;
1992 break;
1993 case TARGET_PROT_DIN_INSERT:
1994 case TARGET_PROT_DOUT_STRIP:
1995 return 1;
1996 default:
1997 break;
1998 }
1999 return 0;
2000}
2001
2002/*
2003 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2004 *
2005 */
2006static inline void
2007qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2008{
2009 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2010
2011 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2012 * have been immplemented by TCM, before AppTag is avail.
2013 * Look for modesense_handlers[]
2014 */
c7ee3bd4 2015 ctx->app_tag = 0;
f83adb61
QT
2016 ctx->app_tag_mask[0] = 0x0;
2017 ctx->app_tag_mask[1] = 0x0;
2018
2019 switch (se_cmd->prot_type) {
2020 case TARGET_DIF_TYPE0_PROT:
2021 /*
2022 * No check for ql2xenablehba_err_chk, as it would be an
2023 * I/O error if hba tag generation is not done.
2024 */
2025 ctx->ref_tag = cpu_to_le32(lba);
2026
2027 if (!qlt_hba_err_chk_enabled(se_cmd))
2028 break;
2029
2030 /* enable ALL bytes of the ref tag */
2031 ctx->ref_tag_mask[0] = 0xff;
2032 ctx->ref_tag_mask[1] = 0xff;
2033 ctx->ref_tag_mask[2] = 0xff;
2034 ctx->ref_tag_mask[3] = 0xff;
2035 break;
2036 /*
2037 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2038 * 16 bit app tag.
2039 */
2040 case TARGET_DIF_TYPE1_PROT:
2041 ctx->ref_tag = cpu_to_le32(lba);
2042
2043 if (!qlt_hba_err_chk_enabled(se_cmd))
2044 break;
2045
2046 /* enable ALL bytes of the ref tag */
2047 ctx->ref_tag_mask[0] = 0xff;
2048 ctx->ref_tag_mask[1] = 0xff;
2049 ctx->ref_tag_mask[2] = 0xff;
2050 ctx->ref_tag_mask[3] = 0xff;
2051 break;
2052 /*
2053 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2054 * match LBA in CDB + N
2055 */
2056 case TARGET_DIF_TYPE2_PROT:
2057 ctx->ref_tag = cpu_to_le32(lba);
2058
2059 if (!qlt_hba_err_chk_enabled(se_cmd))
2060 break;
2061
2062 /* enable ALL bytes of the ref tag */
2063 ctx->ref_tag_mask[0] = 0xff;
2064 ctx->ref_tag_mask[1] = 0xff;
2065 ctx->ref_tag_mask[2] = 0xff;
2066 ctx->ref_tag_mask[3] = 0xff;
2067 break;
2068
2069 /* For Type 3 protection: 16 bit GUARD only */
2070 case TARGET_DIF_TYPE3_PROT:
2071 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2072 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2073 break;
2074 }
2075}
2076
2077
2078static inline int
2079qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2080{
2081 uint32_t *cur_dsd;
2082 int sgc;
2083 uint32_t transfer_length = 0;
2084 uint32_t data_bytes;
2085 uint32_t dif_bytes;
2086 uint8_t bundling = 1;
2087 uint8_t *clr_ptr;
2088 struct crc_context *crc_ctx_pkt = NULL;
2089 struct qla_hw_data *ha;
2090 struct ctio_crc2_to_fw *pkt;
2091 dma_addr_t crc_ctx_dma;
2092 uint16_t fw_prot_opts = 0;
2093 struct qla_tgt_cmd *cmd = prm->cmd;
2094 struct se_cmd *se_cmd = &cmd->se_cmd;
2095 uint32_t h;
2096 struct atio_from_isp *atio = &prm->cmd->atio;
c7ee3bd4 2097 uint16_t t16;
f83adb61
QT
2098
2099 sgc = 0;
2100 ha = vha->hw;
2101
2102 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2103 prm->pkt = pkt;
2104 memset(pkt, 0, sizeof(*pkt));
2105
2106 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2107 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2108 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2109 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2110
2111 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2112 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2113 bundling = 0;
2114
2115 /* Compute dif len and adjust data len to incude protection */
2116 data_bytes = cmd->bufflen;
2117 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2118
2119 switch (se_cmd->prot_op) {
2120 case TARGET_PROT_DIN_INSERT:
2121 case TARGET_PROT_DOUT_STRIP:
2122 transfer_length = data_bytes;
2123 data_bytes += dif_bytes;
2124 break;
2125
2126 case TARGET_PROT_DIN_STRIP:
2127 case TARGET_PROT_DOUT_INSERT:
2128 case TARGET_PROT_DIN_PASS:
2129 case TARGET_PROT_DOUT_PASS:
2130 transfer_length = data_bytes + dif_bytes;
2131 break;
2132
2133 default:
2134 BUG();
2135 break;
2136 }
2137
2138 if (!qlt_hba_err_chk_enabled(se_cmd))
2139 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2140 /* HBA error checking enabled */
2141 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2142 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2143 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2144 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2145 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2146 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2147 }
2148
2149 switch (se_cmd->prot_op) {
2150 case TARGET_PROT_DIN_INSERT:
2151 case TARGET_PROT_DOUT_INSERT:
2152 fw_prot_opts |= PO_MODE_DIF_INSERT;
2153 break;
2154 case TARGET_PROT_DIN_STRIP:
2155 case TARGET_PROT_DOUT_STRIP:
2156 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2157 break;
2158 case TARGET_PROT_DIN_PASS:
2159 case TARGET_PROT_DOUT_PASS:
2160 fw_prot_opts |= PO_MODE_DIF_PASS;
2161 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2162 break;
2163 default:/* Normal Request */
2164 fw_prot_opts |= PO_MODE_DIF_PASS;
2165 break;
2166 }
2167
2168
2169 /* ---- PKT ---- */
2170 /* Update entry type to indicate Command Type CRC_2 IOCB */
2171 pkt->entry_type = CTIO_CRC2;
2172 pkt->entry_count = 1;
2173 pkt->vp_index = vha->vp_idx;
2174
2175 h = qlt_make_handle(vha);
2176 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2177 /*
2178 * CTIO type 7 from the firmware doesn't provide a way to
2179 * know the initiator's LOOP ID, hence we can't find
2180 * the session and, so, the command.
2181 */
2182 return -EAGAIN;
2183 } else
2184 ha->tgt.cmds[h-1] = prm->cmd;
2185
2186
2187 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2188 pkt->nport_handle = prm->cmd->loop_id;
2189 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2190 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2191 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2192 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2193 pkt->exchange_addr = atio->u.isp24.exchange_addr;
c7ee3bd4
QT
2194
2195 /* silence compile warning */
2196 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2197 pkt->ox_id = cpu_to_le16(t16);
2198
2199 t16 = (atio->u.isp24.attr << 9);
2200 pkt->flags |= cpu_to_le16(t16);
f83adb61
QT
2201 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2202
2203 /* Set transfer direction */
2204 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2205 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2206 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2207 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2208
2209
2210 pkt->dseg_count = prm->tot_dsds;
2211 /* Fibre channel byte count */
2212 pkt->transfer_length = cpu_to_le32(transfer_length);
2213
2214
2215 /* ----- CRC context -------- */
2216
2217 /* Allocate CRC context from global pool */
2218 crc_ctx_pkt = cmd->ctx =
2219 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2220
2221 if (!crc_ctx_pkt)
2222 goto crc_queuing_error;
2223
2224 /* Zero out CTX area. */
2225 clr_ptr = (uint8_t *)crc_ctx_pkt;
2226 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2227
2228 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2229 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2230
2231 /* Set handle */
2232 crc_ctx_pkt->handle = pkt->handle;
2233
2234 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2235
2236 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2237 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2238 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2239
2240
2241 if (!bundling) {
2242 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2243 } else {
2244 /*
2245 * Configure Bundling if we need to fetch interlaving
2246 * protection PCI accesses
2247 */
2248 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2249 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2250 crc_ctx_pkt->u.bundling.dseg_count =
2251 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2252 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2253 }
2254
2255 /* Finish the common fields of CRC pkt */
2256 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2257 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2258 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2259 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
2260
2261
2262 /* Walks data segments */
2263 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2264
2265 if (!bundling && prm->prot_seg_cnt) {
2266 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2267 prm->tot_dsds, cmd))
2268 goto crc_queuing_error;
2269 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2270 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2271 goto crc_queuing_error;
2272
2273 if (bundling && prm->prot_seg_cnt) {
2274 /* Walks dif segments */
c7ee3bd4 2275 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
f83adb61
QT
2276
2277 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2278 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2279 prm->prot_seg_cnt, cmd))
2280 goto crc_queuing_error;
2281 }
2282 return QLA_SUCCESS;
2283
2284crc_queuing_error:
2285 /* Cleanup will be performed by the caller */
2286
2287 return QLA_FUNCTION_FAILED;
2288}
2289
2290
2d70c103
NB
2291/*
2292 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2293 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2294 */
2295int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2296 uint8_t scsi_status)
2297{
2298 struct scsi_qla_host *vha = cmd->vha;
2299 struct qla_hw_data *ha = vha->hw;
2300 struct ctio7_to_24xx *pkt;
2301 struct qla_tgt_prm prm;
2302 uint32_t full_req_cnt = 0;
2303 unsigned long flags = 0;
2304 int res;
2305
2306 memset(&prm, 0, sizeof(prm));
2307 qlt_check_srr_debug(cmd, &xmit_type);
2308
2309 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
f83adb61
QT
2310 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2311 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2312 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2313 &cmd->se_cmd);
2d70c103
NB
2314
2315 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2316 &full_req_cnt);
2317 if (unlikely(res != 0)) {
2318 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2319 return 0;
2320
2321 return res;
2322 }
2323
2324 spin_lock_irqsave(&ha->hardware_lock, flags);
2325
b6a029e1
AE
2326 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2327 /*
2328 * Either a chip reset is active or this request was from
2329 * previous life, just abort the processing.
2330 */
2331 cmd->state = QLA_TGT_STATE_PROCESSED;
2332 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2333 ql_dbg(ql_dbg_async, vha, 0xe101,
2334 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2335 qla2x00_reset_active(vha), cmd->reset_count,
2336 ha->chip_reset);
2337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2338 return 0;
2339 }
2340
2d70c103
NB
2341 /* Does F/W have an IOCBs for this request */
2342 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2343 if (unlikely(res))
2344 goto out_unmap_unlock;
2345
f83adb61
QT
2346 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2347 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2348 else
2349 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2d70c103
NB
2350 if (unlikely(res != 0))
2351 goto out_unmap_unlock;
2352
2353
2354 pkt = (struct ctio7_to_24xx *)prm.pkt;
2355
2356 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2357 pkt->u.status0.flags |=
2358 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2359 CTIO7_FLAGS_STATUS_MODE_0);
2360
f83adb61
QT
2361 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2362 qlt_load_data_segments(&prm, vha);
2d70c103
NB
2363
2364 if (prm.add_status_pkt == 0) {
2365 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2366 pkt->u.status0.scsi_status =
2367 cpu_to_le16(prm.rq_result);
2368 pkt->u.status0.residual =
2369 cpu_to_le32(prm.residual);
2370 pkt->u.status0.flags |= __constant_cpu_to_le16(
2371 CTIO7_FLAGS_SEND_STATUS);
2372 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2373 pkt->u.status0.flags |=
2374 __constant_cpu_to_le16(
2375 CTIO7_FLAGS_EXPLICIT_CONFORM |
2376 CTIO7_FLAGS_CONFORM_REQ);
2377 }
2378 }
2379
2380 } else {
2381 /*
2382 * We have already made sure that there is sufficient
2383 * amount of request entries to not drop HW lock in
2384 * req_pkt().
2385 */
2386 struct ctio7_to_24xx *ctio =
2387 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2388
667024a3
AE
2389 ql_dbg(ql_dbg_io, vha, 0x305e,
2390 "Building additional status packet 0x%p.\n",
2391 ctio);
2d70c103 2392
f83adb61
QT
2393 /*
2394 * T10Dif: ctio_crc2_to_fw overlay ontop of
2395 * ctio7_to_24xx
2396 */
2d70c103 2397 memcpy(ctio, pkt, sizeof(*ctio));
f83adb61 2398 /* reset back to CTIO7 */
2d70c103 2399 ctio->entry_count = 1;
f83adb61 2400 ctio->entry_type = CTIO_TYPE7;
2d70c103
NB
2401 ctio->dseg_count = 0;
2402 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2403 CTIO7_FLAGS_DATA_IN);
2404
2405 /* Real finish is ctio_m1's finish */
2406 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2407 pkt->u.status0.flags |= __constant_cpu_to_le16(
2408 CTIO7_FLAGS_DONT_RET_CTIO);
f83adb61
QT
2409
2410 /* qlt_24xx_init_ctio_to_isp will correct
2411 * all neccessary fields that's part of CTIO7.
2412 * There should be no residual of CTIO-CRC2 data.
2413 */
2d70c103
NB
2414 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2415 &prm);
2416 pr_debug("Status CTIO7: %p\n", ctio);
2417 }
2418 } else
2419 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2420
2421
2422 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
d564a372 2423 cmd->cmd_sent_to_fw = 1;
2d70c103 2424
63163e06
HM
2425 /* Memory Barrier */
2426 wmb();
2d70c103
NB
2427 qla2x00_start_iocbs(vha, vha->req);
2428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2429
2430 return 0;
2431
2432out_unmap_unlock:
f9b6721a 2433 qlt_unmap_sg(vha, cmd);
2d70c103
NB
2434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2435
2436 return res;
2437}
2438EXPORT_SYMBOL(qlt_xmit_response);
2439
2440int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2441{
2442 struct ctio7_to_24xx *pkt;
2443 struct scsi_qla_host *vha = cmd->vha;
2444 struct qla_hw_data *ha = vha->hw;
2445 struct qla_tgt *tgt = cmd->tgt;
2446 struct qla_tgt_prm prm;
2447 unsigned long flags;
2448 int res = 0;
2449
2450 memset(&prm, 0, sizeof(prm));
2451 prm.cmd = cmd;
2452 prm.tgt = tgt;
2453 prm.sg = NULL;
2454 prm.req_cnt = 1;
2455
2456 /* Send marker if required */
2457 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2458 return -EIO;
2459
2d70c103
NB
2460 /* Calculate number of entries and segments required */
2461 if (qlt_pci_map_calc_cnt(&prm) != 0)
2462 return -EAGAIN;
2463
2464 spin_lock_irqsave(&ha->hardware_lock, flags);
2465
b6a029e1
AE
2466 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2467 /*
2468 * Either a chip reset is active or this request was from
2469 * previous life, just abort the processing.
2470 */
2471 cmd->state = QLA_TGT_STATE_NEED_DATA;
2472 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2473 ql_dbg(ql_dbg_async, vha, 0xe102,
2474 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2475 qla2x00_reset_active(vha), cmd->reset_count,
2476 ha->chip_reset);
2477 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2478 return 0;
2479 }
2480
2d70c103
NB
2481 /* Does F/W have an IOCBs for this request */
2482 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2483 if (res != 0)
2484 goto out_unlock_free_unmap;
f83adb61
QT
2485 if (cmd->se_cmd.prot_op)
2486 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2487 else
2488 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2d70c103 2489
2d70c103
NB
2490 if (unlikely(res != 0))
2491 goto out_unlock_free_unmap;
2492 pkt = (struct ctio7_to_24xx *)prm.pkt;
2493 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2494 CTIO7_FLAGS_STATUS_MODE_0);
f83adb61
QT
2495
2496 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2497 qlt_load_data_segments(&prm, vha);
2d70c103
NB
2498
2499 cmd->state = QLA_TGT_STATE_NEED_DATA;
d564a372 2500 cmd->cmd_sent_to_fw = 1;
2d70c103 2501
63163e06
HM
2502 /* Memory Barrier */
2503 wmb();
2d70c103
NB
2504 qla2x00_start_iocbs(vha, vha->req);
2505 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2506
2507 return res;
2508
2509out_unlock_free_unmap:
f9b6721a 2510 qlt_unmap_sg(vha, cmd);
2d70c103
NB
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2512
2513 return res;
2514}
2515EXPORT_SYMBOL(qlt_rdy_to_xfer);
2516
f83adb61
QT
2517
2518/*
2519 * Checks the guard or meta-data for the type of error
2520 * detected by the HBA.
2521 */
2522static inline int
2523qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2524 struct ctio_crc_from_fw *sts)
2525{
2526 uint8_t *ap = &sts->actual_dif[0];
2527 uint8_t *ep = &sts->expected_dif[0];
2528 uint32_t e_ref_tag, a_ref_tag;
2529 uint16_t e_app_tag, a_app_tag;
2530 uint16_t e_guard, a_guard;
2531 uint64_t lba = cmd->se_cmd.t_task_lba;
2532
2533 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2534 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2535 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2536
2537 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2538 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2539 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2540
2541 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2542 "iocb(s) %p Returned STATUS.\n", sts);
2543
2544 ql_dbg(ql_dbg_tgt, vha, 0xf075,
fc385045 2545 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
f83adb61
QT
2546 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2547 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2548
2549 /*
2550 * Ignore sector if:
2551 * For type 3: ref & app tag is all 'f's
2552 * For type 0,1,2: app tag is all 'f's
2553 */
2554 if ((a_app_tag == 0xffff) &&
2555 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2556 (a_ref_tag == 0xffffffff))) {
2557 uint32_t blocks_done;
2558
2559 /* 2TB boundary case covered automatically with this */
2560 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2561 cmd->se_cmd.bad_sector = e_ref_tag;
2562 cmd->se_cmd.pi_err = 0;
2563 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2564 "need to return scsi good\n");
2565
2566 /* Update protection tag */
2567 if (cmd->prot_sg_cnt) {
2568 uint32_t i, j = 0, k = 0, num_ent;
2569 struct scatterlist *sg, *sgl;
2570
2571
2572 sgl = cmd->prot_sg;
2573
2574 /* Patch the corresponding protection tags */
2575 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2576 num_ent = sg_dma_len(sg) / 8;
2577 if (k + num_ent < blocks_done) {
2578 k += num_ent;
2579 continue;
2580 }
2581 j = blocks_done - k - 1;
2582 k = blocks_done;
2583 break;
2584 }
2585
2586 if (k != blocks_done) {
2587 ql_log(ql_log_warn, vha, 0xf076,
2588 "unexpected tag values tag:lba=%u:%llu)\n",
2589 e_ref_tag, (unsigned long long)lba);
2590 goto out;
2591 }
2592
2593#if 0
2594 struct sd_dif_tuple *spt;
2595 /* TODO:
2596 * This section came from initiator. Is it valid here?
2597 * should ulp be override with actual val???
2598 */
2599 spt = page_address(sg_page(sg)) + sg->offset;
2600 spt += j;
2601
2602 spt->app_tag = 0xffff;
2603 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2604 spt->ref_tag = 0xffffffff;
2605#endif
2606 }
2607
2608 return 0;
2609 }
2610
2611 /* check guard */
2612 if (e_guard != a_guard) {
2613 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2614 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2615
2616 ql_log(ql_log_warn, vha, 0xe076,
2617 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2618 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2619 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2620 a_guard, e_guard, cmd);
2621 goto out;
2622 }
2623
2624 /* check ref tag */
2625 if (e_ref_tag != a_ref_tag) {
2626 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
2627 cmd->se_cmd.bad_sector = e_ref_tag;
2628
2629 ql_log(ql_log_warn, vha, 0xe077,
2630 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2631 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2632 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2633 a_guard, e_guard, cmd);
2634 goto out;
2635 }
2636
2637 /* check appl tag */
2638 if (e_app_tag != a_app_tag) {
2639 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
2640 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2641
2642 ql_log(ql_log_warn, vha, 0xe078,
2643 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2644 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2645 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2646 a_guard, e_guard, cmd);
2647 goto out;
2648 }
2649out:
2650 return 1;
2651}
2652
2653
2d70c103
NB
2654/* If hardware_lock held on entry, might drop it, then reaquire */
2655/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2656static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2657 struct qla_tgt_cmd *cmd,
2658 struct atio_from_isp *atio)
2659{
2660 struct ctio7_to_24xx *ctio24;
2661 struct qla_hw_data *ha = vha->hw;
2662 request_t *pkt;
2663 int ret = 0;
33a5fcee 2664 uint16_t temp;
2d70c103
NB
2665
2666 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2667
b6a029e1 2668 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2d70c103
NB
2669 if (pkt == NULL) {
2670 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2671 "qla_target(%d): %s failed: unable to allocate "
2672 "request packet\n", vha->vp_idx, __func__);
2673 return -ENOMEM;
2674 }
2675
2676 if (cmd != NULL) {
2677 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2678 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2679 "qla_target(%d): Terminating cmd %p with "
2680 "incorrect state %d\n", vha->vp_idx, cmd,
2681 cmd->state);
2682 } else
2683 ret = 1;
2684 }
2685
2686 pkt->entry_count = 1;
2687 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2688
2689 ctio24 = (struct ctio7_to_24xx *)pkt;
2690 ctio24->entry_type = CTIO_TYPE7;
2691 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2692 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2693 ctio24->vp_index = vha->vp_idx;
2694 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2695 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2696 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2697 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2698 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2699 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2700 CTIO7_FLAGS_TERMINATE);
33a5fcee
QT
2701 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2702 ctio24->u.status1.ox_id = cpu_to_le16(temp);
2d70c103
NB
2703
2704 /* Most likely, it isn't needed */
2705 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2706 &atio->u.isp24.fcp_cmnd.add_cdb[
2707 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2708 if (ctio24->u.status1.residual != 0)
2709 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2710
63163e06
HM
2711 /* Memory Barrier */
2712 wmb();
2d70c103
NB
2713 qla2x00_start_iocbs(vha, vha->req);
2714 return ret;
2715}
2716
2717static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2718 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2719{
2720 unsigned long flags;
2721 int rc;
2722
2723 if (qlt_issue_marker(vha, ha_locked) < 0)
2724 return;
2725
2726 if (ha_locked) {
2727 rc = __qlt_send_term_exchange(vha, cmd, atio);
33e79977
QT
2728 if (rc == -ENOMEM)
2729 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2d70c103
NB
2730 goto done;
2731 }
2732 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2733 rc = __qlt_send_term_exchange(vha, cmd, atio);
33e79977
QT
2734 if (rc == -ENOMEM)
2735 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2d70c103 2736 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
d564a372 2737
2d70c103 2738done:
d564a372
QT
2739 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2740 !cmd->cmd_sent_to_fw)) {
2d70c103
NB
2741 if (!ha_locked && !in_interrupt())
2742 msleep(250); /* just in case */
2743
f9b6721a 2744 qlt_unmap_sg(vha, cmd);
2d70c103
NB
2745 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2746 }
7b898542 2747 return;
2d70c103
NB
2748}
2749
33e79977
QT
2750static void qlt_init_term_exchange(struct scsi_qla_host *vha)
2751{
2752 struct list_head free_list;
2753 struct qla_tgt_cmd *cmd, *tcmd;
2754
2755 vha->hw->tgt.leak_exchg_thresh_hold =
2756 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
2757
2758 cmd = tcmd = NULL;
2759 if (!list_empty(&vha->hw->tgt.q_full_list)) {
2760 INIT_LIST_HEAD(&free_list);
2761 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
2762
2763 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
2764 list_del(&cmd->cmd_list);
2765 /* This cmd was never sent to TCM. There is no need
2766 * to schedule free or call free_cmd
2767 */
2768 qlt_free_cmd(cmd);
2769 vha->hw->tgt.num_qfull_cmds_alloc--;
2770 }
2771 }
2772 vha->hw->tgt.num_qfull_cmds_dropped = 0;
2773}
2774
2775static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2776{
2777 uint32_t total_leaked;
2778
2779 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
2780
2781 if (vha->hw->tgt.leak_exchg_thresh_hold &&
2782 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
2783
2784 ql_dbg(ql_dbg_tgt, vha, 0xe079,
2785 "Chip reset due to exchange starvation: %d/%d.\n",
2786 total_leaked, vha->hw->fw_xcb_count);
2787
2788 if (IS_P3P_TYPE(vha->hw))
2789 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2790 else
2791 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2792 qla2xxx_wake_dpc(vha);
2793 }
2794
2795}
2796
2d70c103
NB
2797void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2798{
51a07f84
NB
2799 struct qla_tgt_sess *sess = cmd->sess;
2800
f83adb61
QT
2801 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
2802 "%s: se_cmd[%p] ox_id %04x\n",
2803 __func__, &cmd->se_cmd,
2804 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2d70c103 2805
e07f8f65
SK
2806 BUG_ON(cmd->cmd_in_wq);
2807
33e79977
QT
2808 if (!cmd->q_full)
2809 qlt_decr_num_pend_cmds(cmd->vha);
2810
f83adb61 2811 BUG_ON(cmd->sg_mapped);
e07f8f65 2812 cmd->jiffies_at_free = get_jiffies_64();
2d70c103
NB
2813 if (unlikely(cmd->free_sg))
2814 kfree(cmd->sg);
51a07f84
NB
2815
2816 if (!sess || !sess->se_sess) {
2817 WARN_ON(1);
2818 return;
2819 }
e07f8f65 2820 cmd->jiffies_at_free = get_jiffies_64();
51a07f84 2821 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2d70c103
NB
2822}
2823EXPORT_SYMBOL(qlt_free_cmd);
2824
2825/* ha->hardware_lock supposed to be held on entry */
2826static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2827 struct qla_tgt_cmd *cmd, void *ctio)
2828{
2829 struct qla_tgt_srr_ctio *sc;
0e8cd71c 2830 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
2831 struct qla_tgt_srr_imm *imm;
2832
2833 tgt->ctio_srr_id++;
e07f8f65 2834 cmd->cmd_flags |= BIT_15;
2d70c103
NB
2835
2836 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2837 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2838
2839 if (!ctio) {
2840 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2841 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2842 vha->vp_idx);
2843 return -EINVAL;
2844 }
2845
2846 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2847 if (sc != NULL) {
2848 sc->cmd = cmd;
2849 /* IRQ is already OFF */
2850 spin_lock(&tgt->srr_lock);
2851 sc->srr_id = tgt->ctio_srr_id;
2852 list_add_tail(&sc->srr_list_entry,
2853 &tgt->srr_ctio_list);
2854 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2855 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2856 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2857 int found = 0;
2858 list_for_each_entry(imm, &tgt->srr_imm_list,
2859 srr_list_entry) {
2860 if (imm->srr_id == sc->srr_id) {
2861 found = 1;
2862 break;
2863 }
2864 }
2865 if (found) {
2866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2867 "Scheduling srr work\n");
2868 schedule_work(&tgt->srr_work);
2869 } else {
2870 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2871 "qla_target(%d): imm_srr_id "
2872 "== ctio_srr_id (%d), but there is no "
2873 "corresponding SRR IMM, deleting CTIO "
2874 "SRR %p\n", vha->vp_idx,
2875 tgt->ctio_srr_id, sc);
2876 list_del(&sc->srr_list_entry);
2877 spin_unlock(&tgt->srr_lock);
2878
2879 kfree(sc);
2880 return -EINVAL;
2881 }
2882 }
2883 spin_unlock(&tgt->srr_lock);
2884 } else {
2885 struct qla_tgt_srr_imm *ti;
2886
2887 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2888 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2889 vha->vp_idx);
2890 spin_lock(&tgt->srr_lock);
2891 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2892 srr_list_entry) {
2893 if (imm->srr_id == tgt->ctio_srr_id) {
2894 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2895 "IMM SRR %p deleted (id %d)\n",
2896 imm, imm->srr_id);
2897 list_del(&imm->srr_list_entry);
2898 qlt_reject_free_srr_imm(vha, imm, 1);
2899 }
2900 }
2901 spin_unlock(&tgt->srr_lock);
2902
2903 return -ENOMEM;
2904 }
2905
2906 return 0;
2907}
2908
2909/*
2910 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2911 */
2912static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2913 struct qla_tgt_cmd *cmd, uint32_t status)
2914{
2915 int term = 0;
2916
2917 if (ctio != NULL) {
2918 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2919 term = !(c->flags &
2920 __constant_cpu_to_le16(OF_TERM_EXCH));
2921 } else
2922 term = 1;
2923
2924 if (term)
2925 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2926
2927 return term;
2928}
2929
2930/* ha->hardware_lock supposed to be held on entry */
2931static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2932 uint32_t handle)
2933{
2934 struct qla_hw_data *ha = vha->hw;
2935
2936 handle--;
2937 if (ha->tgt.cmds[handle] != NULL) {
2938 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2939 ha->tgt.cmds[handle] = NULL;
2940 return cmd;
2941 } else
2942 return NULL;
2943}
2944
2945/* ha->hardware_lock supposed to be held on entry */
2946static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2947 uint32_t handle, void *ctio)
2948{
2949 struct qla_tgt_cmd *cmd = NULL;
2950
2951 /* Clear out internal marks */
2952 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2953 CTIO_INTERMEDIATE_HANDLE_MARK);
2954
2955 if (handle != QLA_TGT_NULL_HANDLE) {
667024a3 2956 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
2d70c103 2957 return NULL;
667024a3 2958
2d70c103 2959 /* handle-1 is actually used */
8d93f550 2960 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2d70c103
NB
2961 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2962 "qla_target(%d): Wrong handle %x received\n",
2963 vha->vp_idx, handle);
2964 return NULL;
2965 }
2966 cmd = qlt_get_cmd(vha, handle);
2967 if (unlikely(cmd == NULL)) {
2968 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2969 "qla_target(%d): Suspicious: unable to "
2970 "find the command with handle %x\n", vha->vp_idx,
2971 handle);
2972 return NULL;
2973 }
2974 } else if (ctio != NULL) {
2975 /* We can't get loop ID from CTIO7 */
2976 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2977 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2978 "support NULL handles\n", vha->vp_idx);
2979 return NULL;
2980 }
2981
2982 return cmd;
2983}
2984
c0cb4496
AE
2985/* hardware_lock should be held by caller. */
2986static void
2987qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2988{
2989 struct qla_hw_data *ha = vha->hw;
2990 uint32_t handle;
2991
2992 if (cmd->sg_mapped)
2993 qlt_unmap_sg(vha, cmd);
2994
2995 handle = qlt_make_handle(vha);
2996
2997 /* TODO: fix debug message type and ids. */
2998 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2999 ql_dbg(ql_dbg_io, vha, 0xff00,
3000 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3001 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3002 cmd->write_data_transferred = 0;
3003 cmd->state = QLA_TGT_STATE_DATA_IN;
3004
3005 ql_dbg(ql_dbg_io, vha, 0xff01,
3006 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3007
3008 ha->tgt.tgt_ops->handle_data(cmd);
3009 return;
3010 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3011 ql_dbg(ql_dbg_io, vha, 0xff02,
3012 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3013 } else {
3014 ql_dbg(ql_dbg_io, vha, 0xff03,
3015 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3016 cmd->state);
3017 dump_stack();
3018 }
3019
e07f8f65 3020 cmd->cmd_flags |= BIT_12;
c0cb4496
AE
3021 ha->tgt.tgt_ops->free_cmd(cmd);
3022}
3023
3024void
3025qlt_host_reset_handler(struct qla_hw_data *ha)
3026{
3027 struct qla_tgt_cmd *cmd;
3028 unsigned long flags;
3029 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3030 scsi_qla_host_t *vha = NULL;
3031 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3032 uint32_t i;
3033
3034 if (!base_vha->hw->tgt.tgt_ops)
3035 return;
3036
3037 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3039 "Target mode disabled\n");
3040 return;
3041 }
3042
3043 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3044 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3045 base_vha->dpc_flags);
3046
3047 spin_lock_irqsave(&ha->hardware_lock, flags);
3048 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3049 cmd = qlt_get_cmd(base_vha, i);
3050 if (!cmd)
3051 continue;
3052 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3053 vha = cmd->vha;
3054 qlt_abort_cmd_on_host_reset(vha, cmd);
3055 }
3056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3057}
3058
3059
2d70c103
NB
3060/*
3061 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3062 */
3063static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3064 uint32_t status, void *ctio)
3065{
3066 struct qla_hw_data *ha = vha->hw;
3067 struct se_cmd *se_cmd;
9ac8928e 3068 const struct target_core_fabric_ops *tfo;
2d70c103
NB
3069 struct qla_tgt_cmd *cmd;
3070
2d70c103
NB
3071 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3072 /* That could happen only in case of an error/reset/abort */
3073 if (status != CTIO_SUCCESS) {
3074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3075 "Intermediate CTIO received"
3076 " (status %x)\n", status);
3077 }
3078 return;
3079 }
3080
3081 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
092e1dc3 3082 if (cmd == NULL)
2d70c103 3083 return;
092e1dc3 3084
2d70c103
NB
3085 se_cmd = &cmd->se_cmd;
3086 tfo = se_cmd->se_tfo;
d564a372 3087 cmd->cmd_sent_to_fw = 0;
2d70c103 3088
f9b6721a 3089 qlt_unmap_sg(vha, cmd);
2d70c103
NB
3090
3091 if (unlikely(status != CTIO_SUCCESS)) {
3092 switch (status & 0xFFFF) {
3093 case CTIO_LIP_RESET:
3094 case CTIO_TARGET_RESET:
3095 case CTIO_ABORTED:
7b898542 3096 /* driver request abort via Terminate exchange */
2d70c103
NB
3097 case CTIO_TIMEOUT:
3098 case CTIO_INVALID_RX_ID:
3099 /* They are OK */
3100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3101 "qla_target(%d): CTIO with "
3102 "status %#x received, state %x, se_cmd %p, "
3103 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3104 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3105 status, cmd->state, se_cmd);
3106 break;
3107
3108 case CTIO_PORT_LOGGED_OUT:
3109 case CTIO_PORT_UNAVAILABLE:
3110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3111 "qla_target(%d): CTIO with PORT LOGGED "
3112 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3113 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3114 status, cmd->state, se_cmd);
3115 break;
3116
3117 case CTIO_SRR_RECEIVED:
3118 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3119 "qla_target(%d): CTIO with SRR_RECEIVED"
3120 " status %x received (state %x, se_cmd %p)\n",
3121 vha->vp_idx, status, cmd->state, se_cmd);
3122 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3123 break;
3124 else
3125 return;
3126
f83adb61
QT
3127 case CTIO_DIF_ERROR: {
3128 struct ctio_crc_from_fw *crc =
3129 (struct ctio_crc_from_fw *)ctio;
3130 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3131 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3132 vha->vp_idx, status, cmd->state, se_cmd,
3133 *((u64 *)&crc->actual_dif[0]),
3134 *((u64 *)&crc->expected_dif[0]));
3135
3136 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3137 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3138 /* scsi Write/xfer rdy complete */
3139 goto skip_term;
3140 } else {
3141 /* scsi read/xmit respond complete
3142 * call handle dif to send scsi status
3143 * rather than terminate exchange.
3144 */
3145 cmd->state = QLA_TGT_STATE_PROCESSED;
3146 ha->tgt.tgt_ops->handle_dif_err(cmd);
3147 return;
3148 }
3149 } else {
3150 /* Need to generate a SCSI good completion.
3151 * because FW did not send scsi status.
3152 */
3153 status = 0;
3154 goto skip_term;
3155 }
3156 break;
3157 }
2d70c103
NB
3158 default:
3159 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
f83adb61 3160 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
2d70c103
NB
3161 vha->vp_idx, status, cmd->state, se_cmd);
3162 break;
3163 }
3164
7b898542
QT
3165
3166 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3167 * cmd is already aborted/terminated, we don't
3168 * need to terminate again. The exchange is already
3169 * cleaned up/freed at FW level. Just cleanup at driver
3170 * level.
3171 */
3172 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
33e79977 3173 (cmd->state != QLA_TGT_STATE_ABORTED)) {
e07f8f65 3174 cmd->cmd_flags |= BIT_13;
2d70c103
NB
3175 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3176 return;
7b898542 3177 }
2d70c103 3178 }
f83adb61 3179skip_term:
2d70c103
NB
3180
3181 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
667024a3 3182 ;
2d70c103
NB
3183 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3184 int rx_status = 0;
3185
3186 cmd->state = QLA_TGT_STATE_DATA_IN;
3187
3188 if (unlikely(status != CTIO_SUCCESS))
3189 rx_status = -EIO;
3190 else
3191 cmd->write_data_transferred = 1;
3192
2d70c103
NB
3193 ha->tgt.tgt_ops->handle_data(cmd);
3194 return;
3195 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3196 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3197 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
3198 } else {
3199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3200 "qla_target(%d): A command in state (%d) should "
3201 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3202 }
3203
7b898542
QT
3204 if (unlikely(status != CTIO_SUCCESS) &&
3205 (cmd->state != QLA_TGT_STATE_ABORTED)) {
2d70c103
NB
3206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3207 dump_stack();
3208 }
3209
33e79977 3210
2d70c103
NB
3211 ha->tgt.tgt_ops->free_cmd(cmd);
3212}
3213
2d70c103
NB
3214static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3215 uint8_t task_codes)
3216{
3217 int fcp_task_attr;
3218
3219 switch (task_codes) {
3220 case ATIO_SIMPLE_QUEUE:
68d81f40 3221 fcp_task_attr = TCM_SIMPLE_TAG;
2d70c103
NB
3222 break;
3223 case ATIO_HEAD_OF_QUEUE:
68d81f40 3224 fcp_task_attr = TCM_HEAD_TAG;
2d70c103
NB
3225 break;
3226 case ATIO_ORDERED_QUEUE:
68d81f40 3227 fcp_task_attr = TCM_ORDERED_TAG;
2d70c103
NB
3228 break;
3229 case ATIO_ACA_QUEUE:
68d81f40 3230 fcp_task_attr = TCM_ACA_TAG;
2d70c103
NB
3231 break;
3232 case ATIO_UNTAGGED:
68d81f40 3233 fcp_task_attr = TCM_SIMPLE_TAG;
2d70c103
NB
3234 break;
3235 default:
3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3237 "qla_target: unknown task code %x, use ORDERED instead\n",
3238 task_codes);
68d81f40 3239 fcp_task_attr = TCM_ORDERED_TAG;
2d70c103
NB
3240 break;
3241 }
3242
3243 return fcp_task_attr;
3244}
3245
3246static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3247 uint8_t *);
3248/*
3249 * Process context for I/O path into tcm_qla2xxx code
3250 */
51a07f84 3251static void __qlt_do_work(struct qla_tgt_cmd *cmd)
2d70c103 3252{
2d70c103
NB
3253 scsi_qla_host_t *vha = cmd->vha;
3254 struct qla_hw_data *ha = vha->hw;
0e8cd71c 3255 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
51a07f84 3256 struct qla_tgt_sess *sess = cmd->sess;
2d70c103
NB
3257 struct atio_from_isp *atio = &cmd->atio;
3258 unsigned char *cdb;
3259 unsigned long flags;
3260 uint32_t data_length;
3261 int ret, fcp_task_attr, data_dir, bidi = 0;
3262
e07f8f65
SK
3263 cmd->cmd_in_wq = 0;
3264 cmd->cmd_flags |= BIT_1;
2d70c103
NB
3265 if (tgt->tgt_stop)
3266 goto out_term;
3267
2d70c103
NB
3268 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3269 cmd->tag = atio->u.isp24.exchange_addr;
3270 cmd->unpacked_lun = scsilun_to_int(
3271 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3272
3273 if (atio->u.isp24.fcp_cmnd.rddata &&
3274 atio->u.isp24.fcp_cmnd.wrdata) {
3275 bidi = 1;
3276 data_dir = DMA_TO_DEVICE;
3277 } else if (atio->u.isp24.fcp_cmnd.rddata)
3278 data_dir = DMA_FROM_DEVICE;
3279 else if (atio->u.isp24.fcp_cmnd.wrdata)
3280 data_dir = DMA_TO_DEVICE;
3281 else
3282 data_dir = DMA_NONE;
3283
3284 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3285 atio->u.isp24.fcp_cmnd.task_attr);
3286 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3287 &atio->u.isp24.fcp_cmnd.add_cdb[
3288 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3289
51a07f84
NB
3290 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3291 fcp_task_attr, data_dir, bidi);
2d70c103
NB
3292 if (ret != 0)
3293 goto out_term;
3294 /*
3295 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3296 */
08234e3a 3297 spin_lock_irqsave(&ha->hardware_lock, flags);
2d70c103 3298 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 3299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
3300 return;
3301
3302out_term:
667024a3 3303 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
2d70c103 3304 /*
fae9eaf8
RD
3305 * cmd has not sent to target yet, so pass NULL as the second
3306 * argument to qlt_send_term_exchange() and free the memory here.
2d70c103 3307 */
e07f8f65 3308 cmd->cmd_flags |= BIT_2;
2d70c103
NB
3309 spin_lock_irqsave(&ha->hardware_lock, flags);
3310 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
33e79977
QT
3311
3312 qlt_decr_num_pend_cmds(vha);
51a07f84
NB
3313 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3314 ha->tgt.tgt_ops->put_sess(sess);
3315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3316}
3317
3318static void qlt_do_work(struct work_struct *work)
3319{
3320 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3321
3322 __qlt_do_work(cmd);
3323}
3324
3325static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3326 struct qla_tgt_sess *sess,
3327 struct atio_from_isp *atio)
3328{
3329 struct se_session *se_sess = sess->se_sess;
3330 struct qla_tgt_cmd *cmd;
3331 int tag;
3332
3333 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3334 if (tag < 0)
3335 return NULL;
3336
3337 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3338 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3339
3340 memcpy(&cmd->atio, atio, sizeof(*atio));
3341 cmd->state = QLA_TGT_STATE_NEW;
3342 cmd->tgt = vha->vha_tgt.qla_tgt;
33e79977 3343 qlt_incr_num_pend_cmds(vha);
51a07f84
NB
3344 cmd->vha = vha;
3345 cmd->se_cmd.map_tag = tag;
3346 cmd->sess = sess;
3347 cmd->loop_id = sess->loop_id;
3348 cmd->conf_compl_supported = sess->conf_compl_supported;
3349
3350 return cmd;
3351}
3352
3353static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3354 uint16_t);
3355
3356static void qlt_create_sess_from_atio(struct work_struct *work)
3357{
3358 struct qla_tgt_sess_op *op = container_of(work,
3359 struct qla_tgt_sess_op, work);
3360 scsi_qla_host_t *vha = op->vha;
3361 struct qla_hw_data *ha = vha->hw;
3362 struct qla_tgt_sess *sess;
3363 struct qla_tgt_cmd *cmd;
3364 unsigned long flags;
3365 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3366
3367 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3368 "qla_target(%d): Unable to find wwn login"
3369 " (s_id %x:%x:%x), trying to create it manually\n",
3370 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3371
3372 if (op->atio.u.raw.entry_count > 1) {
3373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3374 "Dropping multy entry atio %p\n", &op->atio);
3375 goto out_term;
3376 }
3377
3378 mutex_lock(&vha->vha_tgt.tgt_mutex);
3379 sess = qlt_make_local_sess(vha, s_id);
3380 /* sess has an extra creation ref. */
3381 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3382
3383 if (!sess)
3384 goto out_term;
3385 /*
3386 * Now obtain a pre-allocated session tag using the original op->atio
3387 * packet header, and dispatch into __qlt_do_work() using the existing
3388 * process context.
3389 */
3390 cmd = qlt_get_tag(vha, sess, &op->atio);
3391 if (!cmd) {
3392 spin_lock_irqsave(&ha->hardware_lock, flags);
3393 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
2d70c103 3394 ha->tgt.tgt_ops->put_sess(sess);
51a07f84
NB
3395 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3396 kfree(op);
3397 return;
3398 }
3399 /*
3400 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3401 * the extra reference taken above by qlt_make_local_sess()
3402 */
3403 __qlt_do_work(cmd);
3404 kfree(op);
3405 return;
3406
3407out_term:
3408 spin_lock_irqsave(&ha->hardware_lock, flags);
3409 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
08234e3a 3410 spin_unlock_irqrestore(&ha->hardware_lock, flags);
51a07f84
NB
3411 kfree(op);
3412
2d70c103
NB
3413}
3414
3415/* ha->hardware_lock supposed to be held on entry */
3416static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3417 struct atio_from_isp *atio)
3418{
51a07f84 3419 struct qla_hw_data *ha = vha->hw;
0e8cd71c 3420 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
51a07f84 3421 struct qla_tgt_sess *sess;
2d70c103
NB
3422 struct qla_tgt_cmd *cmd;
3423
3424 if (unlikely(tgt->tgt_stop)) {
667024a3 3425 ql_dbg(ql_dbg_io, vha, 0x3061,
2d70c103
NB
3426 "New command while device %p is shutting down\n", tgt);
3427 return -EFAULT;
3428 }
3429
51a07f84
NB
3430 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3431 if (unlikely(!sess)) {
3432 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3433 GFP_ATOMIC);
3434 if (!op)
3435 return -ENOMEM;
3436
3437 memcpy(&op->atio, atio, sizeof(*atio));
78c2106a 3438 op->vha = vha;
51a07f84
NB
3439 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3440 queue_work(qla_tgt_wq, &op->work);
3441 return 0;
3442 }
3443 /*
3444 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3445 */
3446 kref_get(&sess->se_sess->sess_kref);
3447
3448 cmd = qlt_get_tag(vha, sess, atio);
2d70c103 3449 if (!cmd) {
667024a3 3450 ql_dbg(ql_dbg_io, vha, 0x3062,
2d70c103 3451 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
51a07f84 3452 ha->tgt.tgt_ops->put_sess(sess);
2d70c103
NB
3453 return -ENOMEM;
3454 }
3455
e07f8f65
SK
3456 cmd->cmd_flags = 0;
3457 cmd->jiffies_at_alloc = get_jiffies_64();
3458
b6a029e1
AE
3459 cmd->reset_count = vha->hw->chip_reset;
3460
e07f8f65
SK
3461 cmd->cmd_in_wq = 1;
3462 cmd->cmd_flags |= BIT_0;
2d70c103
NB
3463 INIT_WORK(&cmd->work, qlt_do_work);
3464 queue_work(qla_tgt_wq, &cmd->work);
3465 return 0;
3466
3467}
3468
3469/* ha->hardware_lock supposed to be held on entry */
3470static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 int fn, void *iocb, int flags)
3472{
3473 struct scsi_qla_host *vha = sess->vha;
3474 struct qla_hw_data *ha = vha->hw;
3475 struct qla_tgt_mgmt_cmd *mcmd;
3476 int res;
3477 uint8_t tmr_func;
3478
3479 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3480 if (!mcmd) {
3481 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
3482 "qla_target(%d): Allocation of management "
3483 "command failed, some commands and their data could "
3484 "leak\n", vha->vp_idx);
3485 return -ENOMEM;
3486 }
3487 memset(mcmd, 0, sizeof(*mcmd));
3488 mcmd->sess = sess;
3489
3490 if (iocb) {
3491 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3492 sizeof(mcmd->orig_iocb.imm_ntfy));
3493 }
3494 mcmd->tmr_func = fn;
3495 mcmd->flags = flags;
b6a029e1 3496 mcmd->reset_count = vha->hw->chip_reset;
2d70c103
NB
3497
3498 switch (fn) {
3499 case QLA_TGT_CLEAR_ACA:
3500 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
3501 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
3502 tmr_func = TMR_CLEAR_ACA;
3503 break;
3504
3505 case QLA_TGT_TARGET_RESET:
3506 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
3507 "qla_target(%d): TARGET_RESET received\n",
3508 sess->vha->vp_idx);
3509 tmr_func = TMR_TARGET_WARM_RESET;
3510 break;
3511
3512 case QLA_TGT_LUN_RESET:
3513 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3514 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3515 tmr_func = TMR_LUN_RESET;
3516 break;
3517
3518 case QLA_TGT_CLEAR_TS:
3519 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
3520 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
3521 tmr_func = TMR_CLEAR_TASK_SET;
3522 break;
3523
3524 case QLA_TGT_ABORT_TS:
3525 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
3526 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
3527 tmr_func = TMR_ABORT_TASK_SET;
3528 break;
3529#if 0
3530 case QLA_TGT_ABORT_ALL:
3531 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
3532 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3533 sess->vha->vp_idx);
3534 tmr_func = 0;
3535 break;
3536
3537 case QLA_TGT_ABORT_ALL_SESS:
3538 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
3539 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3540 sess->vha->vp_idx);
3541 tmr_func = 0;
3542 break;
3543
3544 case QLA_TGT_NEXUS_LOSS_SESS:
3545 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
3546 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3547 sess->vha->vp_idx);
3548 tmr_func = 0;
3549 break;
3550
3551 case QLA_TGT_NEXUS_LOSS:
3552 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
3553 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
3554 tmr_func = 0;
3555 break;
3556#endif
3557 default:
3558 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
3559 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3560 sess->vha->vp_idx, fn);
3561 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3562 return -ENOSYS;
3563 }
3564
3565 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
3566 if (res != 0) {
3567 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
3568 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3569 sess->vha->vp_idx, res);
3570 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3571 return -EFAULT;
3572 }
3573
3574 return 0;
3575}
3576
3577/* ha->hardware_lock supposed to be held on entry */
3578static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3579{
3580 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3581 struct qla_hw_data *ha = vha->hw;
3582 struct qla_tgt *tgt;
3583 struct qla_tgt_sess *sess;
3584 uint32_t lun, unpacked_lun;
3585 int lun_size, fn;
3586
0e8cd71c 3587 tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
3588
3589 lun = a->u.isp24.fcp_cmnd.lun;
3590 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
3591 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
3592 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3593 a->u.isp24.fcp_hdr.s_id);
3594 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3595
3596 if (!sess) {
3597 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
3598 "qla_target(%d): task mgmt fn 0x%x for "
3599 "non-existant session\n", vha->vp_idx, fn);
3600 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
3601 sizeof(struct atio_from_isp));
3602 }
3603
3604 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3605}
3606
3607/* ha->hardware_lock supposed to be held on entry */
3608static int __qlt_abort_task(struct scsi_qla_host *vha,
3609 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
3610{
3611 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3612 struct qla_hw_data *ha = vha->hw;
3613 struct qla_tgt_mgmt_cmd *mcmd;
3614 uint32_t lun, unpacked_lun;
3615 int rc;
3616
3617 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3618 if (mcmd == NULL) {
3619 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
3620 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3621 vha->vp_idx, __func__);
3622 return -ENOMEM;
3623 }
3624 memset(mcmd, 0, sizeof(*mcmd));
3625
3626 mcmd->sess = sess;
3627 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3628 sizeof(mcmd->orig_iocb.imm_ntfy));
3629
3630 lun = a->u.isp24.fcp_cmnd.lun;
3631 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
80187f8e 3632 mcmd->reset_count = vha->hw->chip_reset;
2d70c103
NB
3633
3634 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3635 le16_to_cpu(iocb->u.isp2x.seq_id));
3636 if (rc != 0) {
3637 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
3638 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3639 vha->vp_idx, rc);
3640 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3641 return -EFAULT;
3642 }
3643
3644 return 0;
3645}
3646
3647/* ha->hardware_lock supposed to be held on entry */
3648static int qlt_abort_task(struct scsi_qla_host *vha,
3649 struct imm_ntfy_from_isp *iocb)
3650{
3651 struct qla_hw_data *ha = vha->hw;
3652 struct qla_tgt_sess *sess;
3653 int loop_id;
3654
3655 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
3656
3657 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
3658 if (sess == NULL) {
3659 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
3660 "qla_target(%d): task abort for unexisting "
3661 "session\n", vha->vp_idx);
0e8cd71c 3662 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
2d70c103
NB
3663 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
3664 }
3665
3666 return __qlt_abort_task(vha, iocb, sess);
3667}
3668
3669/*
3670 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3671 */
3672static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3673 struct imm_ntfy_from_isp *iocb)
3674{
2d70c103
NB
3675 int res = 0;
3676
3677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
7b833558
OK
3678 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3679 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
2d70c103
NB
3680
3681 switch (iocb->u.isp24.status_subcode) {
3682 case ELS_PLOGI:
3683 case ELS_FLOGI:
3684 case ELS_PRLI:
3685 case ELS_LOGO:
3686 case ELS_PRLO:
3687 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3688 break;
3689 case ELS_PDISC:
3690 case ELS_ADISC:
3691 {
0e8cd71c 3692 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
3693 if (tgt->link_reinit_iocb_pending) {
3694 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3695 0, 0, 0, 0, 0, 0);
3696 tgt->link_reinit_iocb_pending = 0;
3697 }
3698 res = 1; /* send notify ack */
3699 break;
3700 }
3701
3702 default:
3703 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3704 "qla_target(%d): Unsupported ELS command %x "
3705 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3706 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3707 break;
3708 }
3709
3710 return res;
3711}
3712
3713static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3714{
3715 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3716 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3717 int i, sg_srr_cnt, bufflen = 0;
3718
3719 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3720 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3721 "cmd->sg_cnt: %u, direction: %d\n",
3722 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3723
3724 /*
3725 * FIXME: Reject non zero SRR relative offset until we can test
3726 * this code properly.
3727 */
3728 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3729 return -1;
3730
3731 if (!cmd->sg || !cmd->sg_cnt) {
3732 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3733 "Missing cmd->sg or zero cmd->sg_cnt in"
3734 " qla_tgt_set_data_offset\n");
3735 return -EINVAL;
3736 }
3737 /*
3738 * Walk the current cmd->sg list until we locate the new sg_srr_start
3739 */
3740 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3741 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3742 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3743 i, sg, sg_page(sg), sg->length, sg->offset);
3744
3745 if ((sg->length + tmp) > offset) {
3746 first_offset = rem_offset;
3747 sg_srr_start = sg;
3748 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3749 "Found matching sg[%d], using %p as sg_srr_start, "
3750 "and using first_offset: %zu\n", i, sg,
3751 first_offset);
3752 break;
3753 }
3754 tmp += sg->length;
3755 rem_offset -= sg->length;
3756 }
3757
3758 if (!sg_srr_start) {
3759 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3760 "Unable to locate sg_srr_start for offset: %u\n", offset);
3761 return -EINVAL;
3762 }
3763 sg_srr_cnt = (cmd->sg_cnt - i);
3764
3765 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3766 if (!sg_srr) {
3767 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3768 "Unable to allocate sgp\n");
3769 return -ENOMEM;
3770 }
3771 sg_init_table(sg_srr, sg_srr_cnt);
3772 sgp = &sg_srr[0];
3773 /*
3774 * Walk the remaining list for sg_srr_start, mapping to the newly
3775 * allocated sg_srr taking first_offset into account.
3776 */
3777 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3778 if (first_offset) {
3779 sg_set_page(sgp, sg_page(sg),
3780 (sg->length - first_offset), first_offset);
3781 first_offset = 0;
3782 } else {
3783 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3784 }
3785 bufflen += sgp->length;
3786
3787 sgp = sg_next(sgp);
3788 if (!sgp)
3789 break;
3790 }
3791
3792 cmd->sg = sg_srr;
3793 cmd->sg_cnt = sg_srr_cnt;
3794 cmd->bufflen = bufflen;
3795 cmd->offset += offset;
3796 cmd->free_sg = 1;
3797
3798 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3799 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3800 cmd->sg_cnt);
3801 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3802 cmd->bufflen);
3803 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3804 cmd->offset);
3805
3806 if (cmd->sg_cnt < 0)
3807 BUG();
3808
3809 if (cmd->bufflen < 0)
3810 BUG();
3811
3812 return 0;
3813}
3814
3815static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3816 uint32_t srr_rel_offs, int *xmit_type)
3817{
3818 int res = 0, rel_offs;
3819
3820 rel_offs = srr_rel_offs - cmd->offset;
3821 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3822 srr_rel_offs, rel_offs);
3823
3824 *xmit_type = QLA_TGT_XMIT_ALL;
3825
3826 if (rel_offs < 0) {
3827 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3828 "qla_target(%d): SRR rel_offs (%d) < 0",
3829 cmd->vha->vp_idx, rel_offs);
3830 res = -1;
3831 } else if (rel_offs == cmd->bufflen)
3832 *xmit_type = QLA_TGT_XMIT_STATUS;
3833 else if (rel_offs > 0)
3834 res = qlt_set_data_offset(cmd, rel_offs);
3835
3836 return res;
3837}
3838
3839/* No locks, thread context */
3840static void qlt_handle_srr(struct scsi_qla_host *vha,
3841 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3842{
3843 struct imm_ntfy_from_isp *ntfy =
3844 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3845 struct qla_hw_data *ha = vha->hw;
3846 struct qla_tgt_cmd *cmd = sctio->cmd;
3847 struct se_cmd *se_cmd = &cmd->se_cmd;
3848 unsigned long flags;
3849 int xmit_type = 0, resp = 0;
3850 uint32_t offset;
3851 uint16_t srr_ui;
3852
3853 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3854 srr_ui = ntfy->u.isp24.srr_ui;
3855
3856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3857 cmd, srr_ui);
3858
3859 switch (srr_ui) {
3860 case SRR_IU_STATUS:
3861 spin_lock_irqsave(&ha->hardware_lock, flags);
3862 qlt_send_notify_ack(vha, ntfy,
3863 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3864 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3865 xmit_type = QLA_TGT_XMIT_STATUS;
3866 resp = 1;
3867 break;
3868 case SRR_IU_DATA_IN:
3869 if (!cmd->sg || !cmd->sg_cnt) {
3870 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3871 "Unable to process SRR_IU_DATA_IN due to"
3872 " missing cmd->sg, state: %d\n", cmd->state);
3873 dump_stack();
3874 goto out_reject;
3875 }
3876 if (se_cmd->scsi_status != 0) {
3877 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3878 "Rejecting SRR_IU_DATA_IN with non GOOD "
3879 "scsi_status\n");
3880 goto out_reject;
3881 }
3882 cmd->bufflen = se_cmd->data_length;
3883
3884 if (qlt_has_data(cmd)) {
3885 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3886 goto out_reject;
3887 spin_lock_irqsave(&ha->hardware_lock, flags);
3888 qlt_send_notify_ack(vha, ntfy,
3889 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3891 resp = 1;
3892 } else {
3893 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3894 "qla_target(%d): SRR for in data for cmd "
3895 "without them (tag %d, SCSI status %d), "
3896 "reject", vha->vp_idx, cmd->tag,
3897 cmd->se_cmd.scsi_status);
3898 goto out_reject;
3899 }
3900 break;
3901 case SRR_IU_DATA_OUT:
3902 if (!cmd->sg || !cmd->sg_cnt) {
3903 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3904 "Unable to process SRR_IU_DATA_OUT due to"
3905 " missing cmd->sg\n");
3906 dump_stack();
3907 goto out_reject;
3908 }
3909 if (se_cmd->scsi_status != 0) {
3910 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3911 "Rejecting SRR_IU_DATA_OUT"
3912 " with non GOOD scsi_status\n");
3913 goto out_reject;
3914 }
3915 cmd->bufflen = se_cmd->data_length;
3916
3917 if (qlt_has_data(cmd)) {
3918 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3919 goto out_reject;
3920 spin_lock_irqsave(&ha->hardware_lock, flags);
3921 qlt_send_notify_ack(vha, ntfy,
3922 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
e07f8f65
SK
3924 if (xmit_type & QLA_TGT_XMIT_DATA) {
3925 cmd->cmd_flags |= BIT_8;
2d70c103 3926 qlt_rdy_to_xfer(cmd);
e07f8f65 3927 }
2d70c103
NB
3928 } else {
3929 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3930 "qla_target(%d): SRR for out data for cmd "
3931 "without them (tag %d, SCSI status %d), "
3932 "reject", vha->vp_idx, cmd->tag,
3933 cmd->se_cmd.scsi_status);
3934 goto out_reject;
3935 }
3936 break;
3937 default:
3938 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3939 "qla_target(%d): Unknown srr_ui value %x",
3940 vha->vp_idx, srr_ui);
3941 goto out_reject;
3942 }
3943
3944 /* Transmit response in case of status and data-in cases */
e07f8f65
SK
3945 if (resp) {
3946 cmd->cmd_flags |= BIT_7;
2d70c103 3947 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
e07f8f65 3948 }
2d70c103
NB
3949
3950 return;
3951
3952out_reject:
3953 spin_lock_irqsave(&ha->hardware_lock, flags);
3954 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3955 NOTIFY_ACK_SRR_FLAGS_REJECT,
3956 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3957 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3958 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3959 cmd->state = QLA_TGT_STATE_DATA_IN;
3960 dump_stack();
e07f8f65
SK
3961 } else {
3962 cmd->cmd_flags |= BIT_9;
2d70c103 3963 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
e07f8f65 3964 }
2d70c103
NB
3965 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3966}
3967
3968static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3969 struct qla_tgt_srr_imm *imm, int ha_locked)
3970{
3971 struct qla_hw_data *ha = vha->hw;
3972 unsigned long flags = 0;
3973
3974 if (!ha_locked)
3975 spin_lock_irqsave(&ha->hardware_lock, flags);
3976
3977 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3978 NOTIFY_ACK_SRR_FLAGS_REJECT,
3979 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3980 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3981
3982 if (!ha_locked)
3983 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3984
3985 kfree(imm);
3986}
3987
3988static void qlt_handle_srr_work(struct work_struct *work)
3989{
3990 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3991 struct scsi_qla_host *vha = tgt->vha;
3992 struct qla_tgt_srr_ctio *sctio;
3993 unsigned long flags;
3994
3995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3996 tgt);
3997
3998restart:
3999 spin_lock_irqsave(&tgt->srr_lock, flags);
4000 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4001 struct qla_tgt_srr_imm *imm, *i, *ti;
4002 struct qla_tgt_cmd *cmd;
4003 struct se_cmd *se_cmd;
4004
4005 imm = NULL;
4006 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4007 srr_list_entry) {
4008 if (i->srr_id == sctio->srr_id) {
4009 list_del(&i->srr_list_entry);
4010 if (imm) {
4011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4012 "qla_target(%d): There must be "
4013 "only one IMM SRR per CTIO SRR "
4014 "(IMM SRR %p, id %d, CTIO %p\n",
4015 vha->vp_idx, i, i->srr_id, sctio);
4016 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4017 } else
4018 imm = i;
4019 }
4020 }
4021
4022 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4023 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4024 sctio->srr_id);
4025
4026 if (imm == NULL) {
4027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4028 "Not found matching IMM for SRR CTIO (id %d)\n",
4029 sctio->srr_id);
4030 continue;
4031 } else
4032 list_del(&sctio->srr_list_entry);
4033
4034 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4035
4036 cmd = sctio->cmd;
4037 /*
4038 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4039 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4040 * logic..
4041 */
4042 cmd->offset = 0;
4043 if (cmd->free_sg) {
4044 kfree(cmd->sg);
4045 cmd->sg = NULL;
4046 cmd->free_sg = 0;
4047 }
4048 se_cmd = &cmd->se_cmd;
4049
4050 cmd->sg_cnt = se_cmd->t_data_nents;
4051 cmd->sg = se_cmd->t_data_sg;
4052
4053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4054 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
4055 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
6f58c780
DGW
4056 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
4057 cmd->sg_cnt, cmd->offset);
2d70c103
NB
4058
4059 qlt_handle_srr(vha, sctio, imm);
4060
4061 kfree(imm);
4062 kfree(sctio);
4063 goto restart;
4064 }
4065 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4066}
4067
4068/* ha->hardware_lock supposed to be held on entry */
4069static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4070 struct imm_ntfy_from_isp *iocb)
4071{
4072 struct qla_tgt_srr_imm *imm;
0e8cd71c 4073 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
4074 struct qla_tgt_srr_ctio *sctio;
4075
4076 tgt->imm_srr_id++;
4077
e07f8f65 4078 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
2d70c103
NB
4079 vha->vp_idx);
4080
4081 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4082 if (imm != NULL) {
4083 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4084
4085 /* IRQ is already OFF */
4086 spin_lock(&tgt->srr_lock);
4087 imm->srr_id = tgt->imm_srr_id;
4088 list_add_tail(&imm->srr_list_entry,
4089 &tgt->srr_imm_list);
4090 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4091 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4092 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4093 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4094 int found = 0;
4095 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4096 srr_list_entry) {
4097 if (sctio->srr_id == imm->srr_id) {
4098 found = 1;
4099 break;
4100 }
4101 }
4102 if (found) {
4103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4104 "Scheduling srr work\n");
4105 schedule_work(&tgt->srr_work);
4106 } else {
4107 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4108 "qla_target(%d): imm_srr_id "
4109 "== ctio_srr_id (%d), but there is no "
4110 "corresponding SRR CTIO, deleting IMM "
4111 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4112 imm);
4113 list_del(&imm->srr_list_entry);
4114
4115 kfree(imm);
4116
4117 spin_unlock(&tgt->srr_lock);
4118 goto out_reject;
4119 }
4120 }
4121 spin_unlock(&tgt->srr_lock);
4122 } else {
4123 struct qla_tgt_srr_ctio *ts;
4124
4125 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4126 "qla_target(%d): Unable to allocate SRR IMM "
4127 "entry, SRR request will be rejected\n", vha->vp_idx);
4128
4129 /* IRQ is already OFF */
4130 spin_lock(&tgt->srr_lock);
4131 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4132 srr_list_entry) {
4133 if (sctio->srr_id == tgt->imm_srr_id) {
4134 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4135 "CTIO SRR %p deleted (id %d)\n",
4136 sctio, sctio->srr_id);
4137 list_del(&sctio->srr_list_entry);
4138 qlt_send_term_exchange(vha, sctio->cmd,
4139 &sctio->cmd->atio, 1);
4140 kfree(sctio);
4141 }
4142 }
4143 spin_unlock(&tgt->srr_lock);
4144 goto out_reject;
4145 }
4146
4147 return;
4148
4149out_reject:
4150 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4151 NOTIFY_ACK_SRR_FLAGS_REJECT,
4152 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4153 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4154}
4155
4156/*
4157 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4158 */
4159static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4160 struct imm_ntfy_from_isp *iocb)
4161{
4162 struct qla_hw_data *ha = vha->hw;
4163 uint32_t add_flags = 0;
4164 int send_notify_ack = 1;
4165 uint16_t status;
4166
4167 status = le16_to_cpu(iocb->u.isp2x.status);
4168 switch (status) {
4169 case IMM_NTFY_LIP_RESET:
4170 {
4171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4172 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4173 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4174 iocb->u.isp24.status_subcode);
4175
4176 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4177 send_notify_ack = 0;
4178 break;
4179 }
4180
4181 case IMM_NTFY_LIP_LINK_REINIT:
4182 {
0e8cd71c 4183 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
4184 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4185 "qla_target(%d): LINK REINIT (loop %#x, "
4186 "subcode %x)\n", vha->vp_idx,
4187 le16_to_cpu(iocb->u.isp24.nport_handle),
4188 iocb->u.isp24.status_subcode);
4189 if (tgt->link_reinit_iocb_pending) {
4190 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4191 0, 0, 0, 0, 0, 0);
4192 }
4193 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
4194 tgt->link_reinit_iocb_pending = 1;
4195 /*
4196 * QLogic requires to wait after LINK REINIT for possible
4197 * PDISC or ADISC ELS commands
4198 */
4199 send_notify_ack = 0;
4200 break;
4201 }
4202
4203 case IMM_NTFY_PORT_LOGOUT:
4204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
4205 "qla_target(%d): Port logout (loop "
4206 "%#x, subcode %x)\n", vha->vp_idx,
4207 le16_to_cpu(iocb->u.isp24.nport_handle),
4208 iocb->u.isp24.status_subcode);
4209
4210 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
4211 send_notify_ack = 0;
4212 /* The sessions will be cleared in the callback, if needed */
4213 break;
4214
4215 case IMM_NTFY_GLBL_TPRLO:
4216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
4217 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
4218 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4219 send_notify_ack = 0;
4220 /* The sessions will be cleared in the callback, if needed */
4221 break;
4222
4223 case IMM_NTFY_PORT_CONFIG:
4224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
4225 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
4226 status);
4227 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4228 send_notify_ack = 0;
4229 /* The sessions will be cleared in the callback, if needed */
4230 break;
4231
4232 case IMM_NTFY_GLBL_LOGO:
4233 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
4234 "qla_target(%d): Link failure detected\n",
4235 vha->vp_idx);
4236 /* I_T nexus loss */
4237 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4238 send_notify_ack = 0;
4239 break;
4240
4241 case IMM_NTFY_IOCB_OVERFLOW:
4242 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
4243 "qla_target(%d): Cannot provide requested "
4244 "capability (IOCB overflowed the immediate notify "
4245 "resource count)\n", vha->vp_idx);
4246 break;
4247
4248 case IMM_NTFY_ABORT_TASK:
4249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
4250 "qla_target(%d): Abort Task (S %08x I %#x -> "
4251 "L %#x)\n", vha->vp_idx,
4252 le16_to_cpu(iocb->u.isp2x.seq_id),
4253 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
4254 le16_to_cpu(iocb->u.isp2x.lun));
4255 if (qlt_abort_task(vha, iocb) == 0)
4256 send_notify_ack = 0;
4257 break;
4258
4259 case IMM_NTFY_RESOURCE:
4260 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
4261 "qla_target(%d): Out of resources, host %ld\n",
4262 vha->vp_idx, vha->host_no);
4263 break;
4264
4265 case IMM_NTFY_MSG_RX:
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
4267 "qla_target(%d): Immediate notify task %x\n",
4268 vha->vp_idx, iocb->u.isp2x.task_flags);
4269 if (qlt_handle_task_mgmt(vha, iocb) == 0)
4270 send_notify_ack = 0;
4271 break;
4272
4273 case IMM_NTFY_ELS:
4274 if (qlt_24xx_handle_els(vha, iocb) == 0)
4275 send_notify_ack = 0;
4276 break;
4277
4278 case IMM_NTFY_SRR:
4279 qlt_prepare_srr_imm(vha, iocb);
4280 send_notify_ack = 0;
4281 break;
4282
4283 default:
4284 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
4285 "qla_target(%d): Received unknown immediate "
4286 "notify status %x\n", vha->vp_idx, status);
4287 break;
4288 }
4289
4290 if (send_notify_ack)
4291 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
4292}
4293
4294/*
4295 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4296 * This function sends busy to ISP 2xxx or 24xx.
4297 */
33e79977 4298static int __qlt_send_busy(struct scsi_qla_host *vha,
2d70c103
NB
4299 struct atio_from_isp *atio, uint16_t status)
4300{
4301 struct ctio7_to_24xx *ctio24;
4302 struct qla_hw_data *ha = vha->hw;
4303 request_t *pkt;
4304 struct qla_tgt_sess *sess = NULL;
4305
4306 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4307 atio->u.isp24.fcp_hdr.s_id);
4308 if (!sess) {
4309 qlt_send_term_exchange(vha, NULL, atio, 1);
33e79977 4310 return 0;
2d70c103
NB
4311 }
4312 /* Sending marker isn't necessary, since we called from ISR */
4313
4314 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
4315 if (!pkt) {
667024a3 4316 ql_dbg(ql_dbg_io, vha, 0x3063,
2d70c103
NB
4317 "qla_target(%d): %s failed: unable to allocate "
4318 "request packet", vha->vp_idx, __func__);
33e79977 4319 return -ENOMEM;
2d70c103
NB
4320 }
4321
4322 pkt->entry_count = 1;
4323 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
4324
4325 ctio24 = (struct ctio7_to_24xx *)pkt;
4326 ctio24->entry_type = CTIO_TYPE7;
4327 ctio24->nport_handle = sess->loop_id;
4328 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
4329 ctio24->vp_index = vha->vp_idx;
4330 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
4331 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
4332 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
4333 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
4334 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
4335 __constant_cpu_to_le16(
4336 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
4337 CTIO7_FLAGS_DONT_RET_CTIO);
4338 /*
4339 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4340 * if the explicit conformation is used.
4341 */
4342 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
4343 ctio24->u.status1.scsi_status = cpu_to_le16(status);
63163e06
HM
4344 /* Memory Barrier */
4345 wmb();
2d70c103 4346 qla2x00_start_iocbs(vha, vha->req);
33e79977
QT
4347 return 0;
4348}
4349
4350/*
4351 * This routine is used to allocate a command for either a QFull condition
4352 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4353 * out previously.
4354 */
4355static void
4356qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
4357 struct atio_from_isp *atio, uint16_t status, int qfull)
4358{
4359 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4360 struct qla_hw_data *ha = vha->hw;
4361 struct qla_tgt_sess *sess;
4362 struct se_session *se_sess;
4363 struct qla_tgt_cmd *cmd;
4364 int tag;
4365
4366 if (unlikely(tgt->tgt_stop)) {
4367 ql_dbg(ql_dbg_io, vha, 0x300a,
4368 "New command while device %p is shutting down\n", tgt);
4369 return;
4370 }
4371
4372 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
4373 vha->hw->tgt.num_qfull_cmds_dropped++;
4374 if (vha->hw->tgt.num_qfull_cmds_dropped >
4375 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4376 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4377 vha->hw->tgt.num_qfull_cmds_dropped;
4378
4379 ql_dbg(ql_dbg_io, vha, 0x3068,
4380 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4381 vha->vp_idx, __func__,
4382 vha->hw->tgt.num_qfull_cmds_dropped);
4383
4384 qlt_chk_exch_leak_thresh_hold(vha);
4385 return;
4386 }
4387
4388 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4389 (vha, atio->u.isp24.fcp_hdr.s_id);
4390 if (!sess)
4391 return;
4392
4393 se_sess = sess->se_sess;
4394
4395 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4396 if (tag < 0)
4397 return;
4398
4399 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4400 if (!cmd) {
4401 ql_dbg(ql_dbg_io, vha, 0x3009,
4402 "qla_target(%d): %s: Allocation of cmd failed\n",
4403 vha->vp_idx, __func__);
4404
4405 vha->hw->tgt.num_qfull_cmds_dropped++;
4406 if (vha->hw->tgt.num_qfull_cmds_dropped >
4407 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4408 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4409 vha->hw->tgt.num_qfull_cmds_dropped;
4410
4411 qlt_chk_exch_leak_thresh_hold(vha);
4412 return;
4413 }
4414
4415 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4416
4417 qlt_incr_num_pend_cmds(vha);
4418 INIT_LIST_HEAD(&cmd->cmd_list);
4419 memcpy(&cmd->atio, atio, sizeof(*atio));
4420
4421 cmd->tgt = vha->vha_tgt.qla_tgt;
4422 cmd->vha = vha;
4423 cmd->reset_count = vha->hw->chip_reset;
4424 cmd->q_full = 1;
4425
4426 if (qfull) {
4427 cmd->q_full = 1;
4428 /* NOTE: borrowing the state field to carry the status */
4429 cmd->state = status;
4430 } else
4431 cmd->term_exchg = 1;
4432
4433 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
4434
4435 vha->hw->tgt.num_qfull_cmds_alloc++;
4436 if (vha->hw->tgt.num_qfull_cmds_alloc >
4437 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
4438 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
4439 vha->hw->tgt.num_qfull_cmds_alloc;
4440}
4441
4442int
4443qlt_free_qfull_cmds(struct scsi_qla_host *vha)
4444{
4445 struct qla_hw_data *ha = vha->hw;
4446 unsigned long flags;
4447 struct qla_tgt_cmd *cmd, *tcmd;
4448 struct list_head free_list;
4449 int rc = 0;
4450
4451 if (list_empty(&ha->tgt.q_full_list))
4452 return 0;
4453
4454 INIT_LIST_HEAD(&free_list);
4455
4456 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
4457
4458 if (list_empty(&ha->tgt.q_full_list)) {
4459 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4460 return 0;
4461 }
4462
4463 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
4464 if (cmd->q_full)
4465 /* cmd->state is a borrowed field to hold status */
4466 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
4467 else if (cmd->term_exchg)
4468 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
4469
4470 if (rc == -ENOMEM)
4471 break;
4472
4473 if (cmd->q_full)
4474 ql_dbg(ql_dbg_io, vha, 0x3006,
4475 "%s: busy sent for ox_id[%04x]\n", __func__,
4476 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4477 else if (cmd->term_exchg)
4478 ql_dbg(ql_dbg_io, vha, 0x3007,
4479 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
4480 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4481 else
4482 ql_dbg(ql_dbg_io, vha, 0x3008,
4483 "%s: Unexpected cmd in QFull list %p\n", __func__,
4484 cmd);
4485
4486 list_del(&cmd->cmd_list);
4487 list_add_tail(&cmd->cmd_list, &free_list);
4488
4489 /* piggy back on hardware_lock for protection */
4490 vha->hw->tgt.num_qfull_cmds_alloc--;
4491 }
4492 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4493
4494 cmd = NULL;
4495
4496 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
4497 list_del(&cmd->cmd_list);
4498 /* This cmd was never sent to TCM. There is no need
4499 * to schedule free or call free_cmd
4500 */
4501 qlt_free_cmd(cmd);
4502 }
4503 return rc;
4504}
4505
4506static void
4507qlt_send_busy(struct scsi_qla_host *vha,
4508 struct atio_from_isp *atio, uint16_t status)
4509{
4510 int rc = 0;
4511
4512 rc = __qlt_send_busy(vha, atio, status);
4513 if (rc == -ENOMEM)
4514 qlt_alloc_qfull_cmd(vha, atio, status, 1);
4515}
4516
4517static int
4518qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
4519 struct atio_from_isp *atio)
4520{
4521 struct qla_hw_data *ha = vha->hw;
4522 uint16_t status;
4523
4524 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
4525 return 0;
4526
4527 status = temp_sam_status;
4528 qlt_send_busy(vha, atio, status);
4529 return 1;
2d70c103
NB
4530}
4531
4532/* ha->hardware_lock supposed to be held on entry */
4533/* called via callback from qla2xxx */
4534static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
4535 struct atio_from_isp *atio)
4536{
4537 struct qla_hw_data *ha = vha->hw;
0e8cd71c 4538 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
4539 int rc;
4540
4541 if (unlikely(tgt == NULL)) {
667024a3 4542 ql_dbg(ql_dbg_io, vha, 0x3064,
2d70c103
NB
4543 "ATIO pkt, but no tgt (ha %p)", ha);
4544 return;
4545 }
2d70c103
NB
4546 /*
4547 * In tgt_stop mode we also should allow all requests to pass.
4548 * Otherwise, some commands can stuck.
4549 */
4550
4551 tgt->irq_cmd_count++;
4552
4553 switch (atio->u.raw.entry_type) {
4554 case ATIO_TYPE7:
2d70c103
NB
4555 if (unlikely(atio->u.isp24.exchange_addr ==
4556 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
667024a3 4557 ql_dbg(ql_dbg_io, vha, 0x3065,
2d70c103
NB
4558 "qla_target(%d): ATIO_TYPE7 "
4559 "received with UNKNOWN exchange address, "
4560 "sending QUEUE_FULL\n", vha->vp_idx);
4561 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
4562 break;
4563 }
33e79977
QT
4564
4565
4566
4567 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
4568 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4569 if (rc != 0) {
4570 tgt->irq_cmd_count--;
4571 return;
4572 }
2d70c103 4573 rc = qlt_handle_cmd_for_atio(vha, atio);
33e79977 4574 } else {
2d70c103 4575 rc = qlt_handle_task_mgmt(vha, atio);
33e79977 4576 }
2d70c103
NB
4577 if (unlikely(rc != 0)) {
4578 if (rc == -ESRCH) {
4579#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4580 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4581#else
4582 qlt_send_term_exchange(vha, NULL, atio, 1);
4583#endif
4584 } else {
4585 if (tgt->tgt_stop) {
4586 ql_dbg(ql_dbg_tgt, vha, 0xe059,
4587 "qla_target: Unable to send "
4588 "command to target for req, "
4589 "ignoring.\n");
4590 } else {
4591 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
4592 "qla_target(%d): Unable to send "
4593 "command to target, sending BUSY "
4594 "status.\n", vha->vp_idx);
4595 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4596 }
4597 }
4598 }
4599 break;
4600
4601 case IMMED_NOTIFY_TYPE:
4602 {
4603 if (unlikely(atio->u.isp2x.entry_status != 0)) {
4604 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
4605 "qla_target(%d): Received ATIO packet %x "
4606 "with error status %x\n", vha->vp_idx,
4607 atio->u.raw.entry_type,
4608 atio->u.isp2x.entry_status);
4609 break;
4610 }
4611 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4612 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
4613 break;
4614 }
4615
4616 default:
4617 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
4618 "qla_target(%d): Received unknown ATIO atio "
4619 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
4620 break;
4621 }
4622
4623 tgt->irq_cmd_count--;
4624}
4625
4626/* ha->hardware_lock supposed to be held on entry */
4627/* called via callback from qla2xxx */
4628static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4629{
4630 struct qla_hw_data *ha = vha->hw;
0e8cd71c 4631 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
4632
4633 if (unlikely(tgt == NULL)) {
4634 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
4635 "qla_target(%d): Response pkt %x received, but no "
4636 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
4637 return;
4638 }
4639
2d70c103
NB
4640 /*
4641 * In tgt_stop mode we also should allow all requests to pass.
4642 * Otherwise, some commands can stuck.
4643 */
4644
4645 tgt->irq_cmd_count++;
4646
4647 switch (pkt->entry_type) {
f83adb61 4648 case CTIO_CRC2:
2d70c103
NB
4649 case CTIO_TYPE7:
4650 {
4651 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
2d70c103
NB
4652 qlt_do_ctio_completion(vha, entry->handle,
4653 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4654 entry);
4655 break;
4656 }
4657
4658 case ACCEPT_TGT_IO_TYPE:
4659 {
4660 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
4661 int rc;
2d70c103
NB
4662 if (atio->u.isp2x.status !=
4663 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
4664 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
4665 "qla_target(%d): ATIO with error "
4666 "status %x received\n", vha->vp_idx,
4667 le16_to_cpu(atio->u.isp2x.status));
4668 break;
4669 }
2d70c103 4670
33e79977
QT
4671 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4672 if (rc != 0) {
4673 tgt->irq_cmd_count--;
4674 return;
4675 }
4676
2d70c103
NB
4677 rc = qlt_handle_cmd_for_atio(vha, atio);
4678 if (unlikely(rc != 0)) {
4679 if (rc == -ESRCH) {
4680#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4681 qlt_send_busy(vha, atio, 0);
4682#else
4683 qlt_send_term_exchange(vha, NULL, atio, 1);
4684#endif
4685 } else {
4686 if (tgt->tgt_stop) {
4687 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
4688 "qla_target: Unable to send "
4689 "command to target, sending TERM "
4690 "EXCHANGE for rsp\n");
4691 qlt_send_term_exchange(vha, NULL,
4692 atio, 1);
4693 } else {
4694 ql_dbg(ql_dbg_tgt, vha, 0xe060,
4695 "qla_target(%d): Unable to send "
4696 "command to target, sending BUSY "
4697 "status\n", vha->vp_idx);
4698 qlt_send_busy(vha, atio, 0);
4699 }
4700 }
4701 }
4702 }
4703 break;
4704
4705 case CONTINUE_TGT_IO_TYPE:
4706 {
4707 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
2d70c103
NB
4708 qlt_do_ctio_completion(vha, entry->handle,
4709 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4710 entry);
4711 break;
4712 }
4713
4714 case CTIO_A64_TYPE:
4715 {
4716 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
2d70c103
NB
4717 qlt_do_ctio_completion(vha, entry->handle,
4718 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4719 entry);
4720 break;
4721 }
4722
4723 case IMMED_NOTIFY_TYPE:
4724 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
4725 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
4726 break;
4727
4728 case NOTIFY_ACK_TYPE:
4729 if (tgt->notify_ack_expected > 0) {
4730 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
4731 ql_dbg(ql_dbg_tgt, vha, 0xe036,
4732 "NOTIFY_ACK seq %08x status %x\n",
4733 le16_to_cpu(entry->u.isp2x.seq_id),
4734 le16_to_cpu(entry->u.isp2x.status));
4735 tgt->notify_ack_expected--;
4736 if (entry->u.isp2x.status !=
4737 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
4738 ql_dbg(ql_dbg_tgt, vha, 0xe061,
4739 "qla_target(%d): NOTIFY_ACK "
4740 "failed %x\n", vha->vp_idx,
4741 le16_to_cpu(entry->u.isp2x.status));
4742 }
4743 } else {
4744 ql_dbg(ql_dbg_tgt, vha, 0xe062,
4745 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4746 vha->vp_idx);
4747 }
4748 break;
4749
4750 case ABTS_RECV_24XX:
4751 ql_dbg(ql_dbg_tgt, vha, 0xe037,
4752 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
4753 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
4754 break;
4755
4756 case ABTS_RESP_24XX:
4757 if (tgt->abts_resp_expected > 0) {
4758 struct abts_resp_from_24xx_fw *entry =
4759 (struct abts_resp_from_24xx_fw *)pkt;
4760 ql_dbg(ql_dbg_tgt, vha, 0xe038,
4761 "ABTS_RESP_24XX: compl_status %x\n",
4762 entry->compl_status);
4763 tgt->abts_resp_expected--;
4764 if (le16_to_cpu(entry->compl_status) !=
4765 ABTS_RESP_COMPL_SUCCESS) {
4766 if ((entry->error_subcode1 == 0x1E) &&
4767 (entry->error_subcode2 == 0)) {
4768 /*
4769 * We've got a race here: aborted
4770 * exchange not terminated, i.e.
4771 * response for the aborted command was
4772 * sent between the abort request was
4773 * received and processed.
4774 * Unfortunately, the firmware has a
4775 * silly requirement that all aborted
4776 * exchanges must be explicitely
4777 * terminated, otherwise it refuses to
4778 * send responses for the abort
4779 * requests. So, we have to
4780 * (re)terminate the exchange and retry
4781 * the abort response.
4782 */
4783 qlt_24xx_retry_term_exchange(vha,
4784 entry);
4785 } else
4786 ql_dbg(ql_dbg_tgt, vha, 0xe063,
4787 "qla_target(%d): ABTS_RESP_24XX "
4788 "failed %x (subcode %x:%x)",
4789 vha->vp_idx, entry->compl_status,
4790 entry->error_subcode1,
4791 entry->error_subcode2);
4792 }
4793 } else {
4794 ql_dbg(ql_dbg_tgt, vha, 0xe064,
4795 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4796 "received\n", vha->vp_idx);
4797 }
4798 break;
4799
4800 default:
4801 ql_dbg(ql_dbg_tgt, vha, 0xe065,
4802 "qla_target(%d): Received unknown response pkt "
4803 "type %x\n", vha->vp_idx, pkt->entry_type);
4804 break;
4805 }
4806
4807 tgt->irq_cmd_count--;
4808}
4809
4810/*
4811 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4812 */
4813void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4814 uint16_t *mailbox)
4815{
4816 struct qla_hw_data *ha = vha->hw;
0e8cd71c 4817 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4f1d0f19 4818 int login_code;
2d70c103 4819
2d70c103
NB
4820 if (!ha->tgt.tgt_ops)
4821 return;
4822
4823 if (unlikely(tgt == NULL)) {
4824 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
4825 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
4826 return;
4827 }
4828
4829 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
4830 IS_QLA2100(ha))
4831 return;
4832 /*
4833 * In tgt_stop mode we also should allow all requests to pass.
4834 * Otherwise, some commands can stuck.
4835 */
4836
4837 tgt->irq_cmd_count++;
4838
4839 switch (code) {
4840 case MBA_RESET: /* Reset */
4841 case MBA_SYSTEM_ERR: /* System Error */
4842 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
4843 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
4844 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
4845 "qla_target(%d): System error async event %#x "
6efb3c0a 4846 "occurred", vha->vp_idx, code);
2d70c103
NB
4847 break;
4848 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
4849 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4850 break;
4851
4852 case MBA_LOOP_UP:
4853 {
4854 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6efb3c0a 4855 "qla_target(%d): Async LOOP_UP occurred "
4f1d0f19
AC
4856 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4857 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4858 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
2d70c103
NB
4859 if (tgt->link_reinit_iocb_pending) {
4860 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4861 0, 0, 0, 0, 0, 0);
4862 tgt->link_reinit_iocb_pending = 0;
4863 }
4864 break;
4865 }
4866
4867 case MBA_LIP_OCCURRED:
4868 case MBA_LOOP_DOWN:
4869 case MBA_LIP_RESET:
4870 case MBA_RSCN_UPDATE:
4871 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6efb3c0a 4872 "qla_target(%d): Async event %#x occurred "
4f1d0f19
AC
4873 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4874 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4875 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
2d70c103
NB
4876 break;
4877
4878 case MBA_PORT_UPDATE:
4879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4880 "qla_target(%d): Port update async event %#x "
6efb3c0a 4881 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4f1d0f19
AC
4882 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4883 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4884 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4885
4886 login_code = le16_to_cpu(mailbox[2]);
4887 if (login_code == 0x4)
2d70c103
NB
4888 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4889 "Async MB 2: Got PLOGI Complete\n");
4f1d0f19 4890 else if (login_code == 0x7)
2d70c103
NB
4891 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4892 "Async MB 2: Port Logged Out\n");
4893 break;
4894
4895 default:
2d70c103
NB
4896 break;
4897 }
4898
4899 tgt->irq_cmd_count--;
4900}
4901
4902static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4903 uint16_t loop_id)
4904{
4905 fc_port_t *fcport;
4906 int rc;
4907
4908 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4909 if (!fcport) {
4910 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4911 "qla_target(%d): Allocation of tmp FC port failed",
4912 vha->vp_idx);
4913 return NULL;
4914 }
4915
2d70c103
NB
4916 fcport->loop_id = loop_id;
4917
4918 rc = qla2x00_get_port_database(vha, fcport, 0);
4919 if (rc != QLA_SUCCESS) {
4920 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4921 "qla_target(%d): Failed to retrieve fcport "
4922 "information -- get_port_database() returned %x "
4923 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4924 kfree(fcport);
4925 return NULL;
4926 }
4927
4928 return fcport;
4929}
4930
4931/* Must be called under tgt_mutex */
4932static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4933 uint8_t *s_id)
4934{
2d70c103
NB
4935 struct qla_tgt_sess *sess = NULL;
4936 fc_port_t *fcport = NULL;
4937 int rc, global_resets;
4938 uint16_t loop_id = 0;
4939
4940retry:
0e8cd71c
SK
4941 global_resets =
4942 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
2d70c103
NB
4943
4944 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4945 if (rc != 0) {
4946 if ((s_id[0] == 0xFF) &&
4947 (s_id[1] == 0xFC)) {
4948 /*
4949 * This is Domain Controller, so it should be
4950 * OK to drop SCSI commands from it.
4951 */
4952 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4953 "Unable to find initiator with S_ID %x:%x:%x",
4954 s_id[0], s_id[1], s_id[2]);
4955 } else
4956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4957 "qla_target(%d): Unable to find "
4958 "initiator with S_ID %x:%x:%x",
4959 vha->vp_idx, s_id[0], s_id[1],
4960 s_id[2]);
4961 return NULL;
4962 }
4963
4964 fcport = qlt_get_port_database(vha, loop_id);
4965 if (!fcport)
4966 return NULL;
4967
4968 if (global_resets !=
0e8cd71c 4969 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
2d70c103
NB
4970 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4971 "qla_target(%d): global reset during session discovery "
4972 "(counter was %d, new %d), retrying", vha->vp_idx,
4973 global_resets,
0e8cd71c
SK
4974 atomic_read(&vha->vha_tgt.
4975 qla_tgt->tgt_global_resets_count));
2d70c103
NB
4976 goto retry;
4977 }
4978
4979 sess = qlt_create_sess(vha, fcport, true);
4980
4981 kfree(fcport);
4982 return sess;
4983}
4984
4985static void qlt_abort_work(struct qla_tgt *tgt,
4986 struct qla_tgt_sess_work_param *prm)
4987{
4988 struct scsi_qla_host *vha = tgt->vha;
4989 struct qla_hw_data *ha = vha->hw;
4990 struct qla_tgt_sess *sess = NULL;
4991 unsigned long flags;
4992 uint32_t be_s_id;
4993 uint8_t s_id[3];
4994 int rc;
4995
4996 spin_lock_irqsave(&ha->hardware_lock, flags);
4997
4998 if (tgt->tgt_stop)
4999 goto out_term;
5000
5001 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5002 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5003 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5004
5005 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5006 (unsigned char *)&be_s_id);
5007 if (!sess) {
5008 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5009
0e8cd71c 5010 mutex_lock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
5011 sess = qlt_make_local_sess(vha, s_id);
5012 /* sess has got an extra creation ref */
0e8cd71c 5013 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
5014
5015 spin_lock_irqsave(&ha->hardware_lock, flags);
5016 if (!sess)
5017 goto out_term;
5018 } else {
5019 kref_get(&sess->se_sess->sess_kref);
5020 }
5021
5022 if (tgt->tgt_stop)
5023 goto out_term;
5024
5025 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5026 if (rc != 0)
5027 goto out_term;
2d70c103
NB
5028
5029 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 5030 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
5031 return;
5032
5033out_term:
5034 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
2d70c103
NB
5035 if (sess)
5036 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 5037 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
5038}
5039
5040static void qlt_tmr_work(struct qla_tgt *tgt,
5041 struct qla_tgt_sess_work_param *prm)
5042{
5043 struct atio_from_isp *a = &prm->tm_iocb2;
5044 struct scsi_qla_host *vha = tgt->vha;
5045 struct qla_hw_data *ha = vha->hw;
5046 struct qla_tgt_sess *sess = NULL;
5047 unsigned long flags;
5048 uint8_t *s_id = NULL; /* to hide compiler warnings */
5049 int rc;
5050 uint32_t lun, unpacked_lun;
5051 int lun_size, fn;
5052 void *iocb;
5053
5054 spin_lock_irqsave(&ha->hardware_lock, flags);
5055
5056 if (tgt->tgt_stop)
5057 goto out_term;
5058
5059 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5060 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5061 if (!sess) {
5062 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5063
0e8cd71c 5064 mutex_lock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
5065 sess = qlt_make_local_sess(vha, s_id);
5066 /* sess has got an extra creation ref */
0e8cd71c 5067 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2d70c103
NB
5068
5069 spin_lock_irqsave(&ha->hardware_lock, flags);
5070 if (!sess)
5071 goto out_term;
5072 } else {
5073 kref_get(&sess->se_sess->sess_kref);
5074 }
5075
5076 iocb = a;
5077 lun = a->u.isp24.fcp_cmnd.lun;
5078 lun_size = sizeof(lun);
5079 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5080 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5081
5082 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5083 if (rc != 0)
5084 goto out_term;
2d70c103
NB
5085
5086 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 5087 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
5088 return;
5089
5090out_term:
5091 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
2d70c103
NB
5092 if (sess)
5093 ha->tgt.tgt_ops->put_sess(sess);
08234e3a 5094 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103
NB
5095}
5096
5097static void qlt_sess_work_fn(struct work_struct *work)
5098{
5099 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5100 struct scsi_qla_host *vha = tgt->vha;
5101 unsigned long flags;
5102
5103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5104
5105 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5106 while (!list_empty(&tgt->sess_works_list)) {
5107 struct qla_tgt_sess_work_param *prm = list_entry(
5108 tgt->sess_works_list.next, typeof(*prm),
5109 sess_works_list_entry);
5110
5111 /*
5112 * This work can be scheduled on several CPUs at time, so we
5113 * must delete the entry to eliminate double processing
5114 */
5115 list_del(&prm->sess_works_list_entry);
5116
5117 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5118
5119 switch (prm->type) {
5120 case QLA_TGT_SESS_WORK_ABORT:
5121 qlt_abort_work(tgt, prm);
5122 break;
5123 case QLA_TGT_SESS_WORK_TM:
5124 qlt_tmr_work(tgt, prm);
5125 break;
5126 default:
5127 BUG_ON(1);
5128 break;
5129 }
5130
5131 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5132
5133 kfree(prm);
5134 }
5135 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5136}
5137
5138/* Must be called under tgt_host_action_mutex */
5139int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5140{
5141 struct qla_tgt *tgt;
5142
5143 if (!QLA_TGT_MODE_ENABLED())
5144 return 0;
5145
33c36c0a
AE
5146 if (!IS_TGT_MODE_CAPABLE(ha)) {
5147 ql_log(ql_log_warn, base_vha, 0xe070,
5148 "This adapter does not support target mode.\n");
5149 return 0;
5150 }
5151
2d70c103 5152 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
0e8cd71c 5153 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
2d70c103 5154
0e8cd71c 5155 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
2d70c103
NB
5156
5157 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
5158 if (!tgt) {
5159 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
5160 "Unable to allocate struct qla_tgt\n");
5161 return -ENOMEM;
5162 }
5163
5164 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
5165 base_vha->host->hostt->supported_mode |= MODE_TARGET;
5166
5167 tgt->ha = ha;
5168 tgt->vha = base_vha;
5169 init_waitqueue_head(&tgt->waitQ);
5170 INIT_LIST_HEAD(&tgt->sess_list);
5171 INIT_LIST_HEAD(&tgt->del_sess_list);
5172 INIT_DELAYED_WORK(&tgt->sess_del_work,
5173 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
5174 spin_lock_init(&tgt->sess_work_lock);
5175 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
5176 INIT_LIST_HEAD(&tgt->sess_works_list);
5177 spin_lock_init(&tgt->srr_lock);
5178 INIT_LIST_HEAD(&tgt->srr_ctio_list);
5179 INIT_LIST_HEAD(&tgt->srr_imm_list);
5180 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
5181 atomic_set(&tgt->tgt_global_resets_count, 0);
5182
0e8cd71c 5183 base_vha->vha_tgt.qla_tgt = tgt;
2d70c103
NB
5184
5185 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
5186 "qla_target(%d): using 64 Bit PCI addressing",
5187 base_vha->vp_idx);
5188 tgt->tgt_enable_64bit_addr = 1;
5189 /* 3 is reserved */
5190 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
5191 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5192 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5193
ddb95145
NB
5194 if (base_vha->fc_vport)
5195 return 0;
5196
2d70c103
NB
5197 mutex_lock(&qla_tgt_mutex);
5198 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5199 mutex_unlock(&qla_tgt_mutex);
5200
5201 return 0;
5202}
5203
5204/* Must be called under tgt_host_action_mutex */
5205int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5206{
0e8cd71c 5207 if (!vha->vha_tgt.qla_tgt)
2d70c103
NB
5208 return 0;
5209
ddb95145
NB
5210 if (vha->fc_vport) {
5211 qlt_release(vha->vha_tgt.qla_tgt);
5212 return 0;
5213 }
33e79977
QT
5214
5215 /* free left over qfull cmds */
5216 qlt_init_term_exchange(vha);
5217
2d70c103 5218 mutex_lock(&qla_tgt_mutex);
0e8cd71c 5219 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
2d70c103
NB
5220 mutex_unlock(&qla_tgt_mutex);
5221
5222 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
5223 vha->host_no, ha);
0e8cd71c 5224 qlt_release(vha->vha_tgt.qla_tgt);
2d70c103
NB
5225
5226 return 0;
5227}
5228
5229static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5230 unsigned char *b)
5231{
5232 int i;
5233
5234 pr_debug("qla2xxx HW vha->node_name: ");
5235 for (i = 0; i < WWN_SIZE; i++)
5236 pr_debug("%02x ", vha->node_name[i]);
5237 pr_debug("\n");
5238 pr_debug("qla2xxx HW vha->port_name: ");
5239 for (i = 0; i < WWN_SIZE; i++)
5240 pr_debug("%02x ", vha->port_name[i]);
5241 pr_debug("\n");
5242
5243 pr_debug("qla2xxx passed configfs WWPN: ");
5244 put_unaligned_be64(wwpn, b);
5245 for (i = 0; i < WWN_SIZE; i++)
5246 pr_debug("%02x ", b[i]);
5247 pr_debug("\n");
5248}
5249
5250/**
5251 * qla_tgt_lport_register - register lport with external module
5252 *
5253 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5254 * @wwpn: Passwd FC target WWPN
5255 * @callback: lport initialization callback for tcm_qla2xxx code
5256 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5257 */
49a47f2c
NB
5258int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
5259 u64 npiv_wwpn, u64 npiv_wwnn,
5260 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
2d70c103
NB
5261{
5262 struct qla_tgt *tgt;
5263 struct scsi_qla_host *vha;
5264 struct qla_hw_data *ha;
5265 struct Scsi_Host *host;
5266 unsigned long flags;
5267 int rc;
5268 u8 b[WWN_SIZE];
5269
5270 mutex_lock(&qla_tgt_mutex);
5271 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
5272 vha = tgt->vha;
5273 ha = vha->hw;
5274
5275 host = vha->host;
5276 if (!host)
5277 continue;
5278
2d70c103
NB
5279 if (!(host->hostt->supported_mode & MODE_TARGET))
5280 continue;
5281
5282 spin_lock_irqsave(&ha->hardware_lock, flags);
49a47f2c 5283 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
2d70c103
NB
5284 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5285 host->host_no);
5286 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5287 continue;
5288 }
ddb95145
NB
5289 if (tgt->tgt_stop) {
5290 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5291 host->host_no);
5292 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5293 continue;
5294 }
2d70c103
NB
5295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5296
5297 if (!scsi_host_get(host)) {
5298 ql_dbg(ql_dbg_tgt, vha, 0xe068,
5299 "Unable to scsi_host_get() for"
5300 " qla2xxx scsi_host\n");
5301 continue;
5302 }
49a47f2c 5303 qlt_lport_dump(vha, phys_wwpn, b);
2d70c103
NB
5304
5305 if (memcmp(vha->port_name, b, WWN_SIZE)) {
5306 scsi_host_put(host);
5307 continue;
5308 }
49a47f2c
NB
5309 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
5310 if (rc != 0)
5311 scsi_host_put(host);
5312
ddb95145 5313 mutex_unlock(&qla_tgt_mutex);
2d70c103
NB
5314 return rc;
5315 }
5316 mutex_unlock(&qla_tgt_mutex);
5317
5318 return -ENODEV;
5319}
5320EXPORT_SYMBOL(qlt_lport_register);
5321
5322/**
5323 * qla_tgt_lport_deregister - Degister lport
5324 *
5325 * @vha: Registered scsi_qla_host pointer
5326 */
5327void qlt_lport_deregister(struct scsi_qla_host *vha)
5328{
5329 struct qla_hw_data *ha = vha->hw;
5330 struct Scsi_Host *sh = vha->host;
5331 /*
5332 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5333 */
0e8cd71c 5334 vha->vha_tgt.target_lport_ptr = NULL;
2d70c103
NB
5335 ha->tgt.tgt_ops = NULL;
5336 /*
5337 * Release the Scsi_Host reference for the underlying qla2xxx host
5338 */
5339 scsi_host_put(sh);
5340}
5341EXPORT_SYMBOL(qlt_lport_deregister);
5342
5343/* Must be called under HW lock */
55a9066f 5344static void qlt_set_mode(struct scsi_qla_host *vha)
2d70c103
NB
5345{
5346 struct qla_hw_data *ha = vha->hw;
5347
5348 switch (ql2x_ini_mode) {
5349 case QLA2XXX_INI_MODE_DISABLED:
5350 case QLA2XXX_INI_MODE_EXCLUSIVE:
5351 vha->host->active_mode = MODE_TARGET;
5352 break;
5353 case QLA2XXX_INI_MODE_ENABLED:
5354 vha->host->active_mode |= MODE_TARGET;
5355 break;
5356 default:
5357 break;
5358 }
5359
5360 if (ha->tgt.ini_mode_force_reverse)
5361 qla_reverse_ini_mode(vha);
5362}
5363
5364/* Must be called under HW lock */
55a9066f 5365static void qlt_clear_mode(struct scsi_qla_host *vha)
2d70c103
NB
5366{
5367 struct qla_hw_data *ha = vha->hw;
5368
5369 switch (ql2x_ini_mode) {
5370 case QLA2XXX_INI_MODE_DISABLED:
5371 vha->host->active_mode = MODE_UNKNOWN;
5372 break;
5373 case QLA2XXX_INI_MODE_EXCLUSIVE:
5374 vha->host->active_mode = MODE_INITIATOR;
5375 break;
5376 case QLA2XXX_INI_MODE_ENABLED:
5377 vha->host->active_mode &= ~MODE_TARGET;
5378 break;
5379 default:
5380 break;
5381 }
5382
5383 if (ha->tgt.ini_mode_force_reverse)
5384 qla_reverse_ini_mode(vha);
5385}
5386
5387/*
5388 * qla_tgt_enable_vha - NO LOCK HELD
5389 *
5390 * host_reset, bring up w/ Target Mode Enabled
5391 */
5392void
5393qlt_enable_vha(struct scsi_qla_host *vha)
5394{
5395 struct qla_hw_data *ha = vha->hw;
0e8cd71c 5396 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103 5397 unsigned long flags;
0e8cd71c 5398 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
2d70c103
NB
5399
5400 if (!tgt) {
5401 ql_dbg(ql_dbg_tgt, vha, 0xe069,
5402 "Unable to locate qla_tgt pointer from"
5403 " struct qla_hw_data\n");
5404 dump_stack();
5405 return;
5406 }
5407
5408 spin_lock_irqsave(&ha->hardware_lock, flags);
5409 tgt->tgt_stopped = 0;
5410 qlt_set_mode(vha);
5411 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5412
0e8cd71c
SK
5413 if (vha->vp_idx) {
5414 qla24xx_disable_vp(vha);
5415 qla24xx_enable_vp(vha);
5416 } else {
5417 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
5418 qla2xxx_wake_dpc(base_vha);
5419 qla2x00_wait_for_hba_online(base_vha);
5420 }
2d70c103
NB
5421}
5422EXPORT_SYMBOL(qlt_enable_vha);
5423
5424/*
5425 * qla_tgt_disable_vha - NO LOCK HELD
5426 *
5427 * Disable Target Mode and reset the adapter
5428 */
55a9066f 5429static void qlt_disable_vha(struct scsi_qla_host *vha)
2d70c103
NB
5430{
5431 struct qla_hw_data *ha = vha->hw;
0e8cd71c 5432 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2d70c103
NB
5433 unsigned long flags;
5434
5435 if (!tgt) {
5436 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
5437 "Unable to locate qla_tgt pointer from"
5438 " struct qla_hw_data\n");
5439 dump_stack();
5440 return;
5441 }
5442
5443 spin_lock_irqsave(&ha->hardware_lock, flags);
5444 qlt_clear_mode(vha);
5445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5446
5447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5448 qla2xxx_wake_dpc(vha);
5449 qla2x00_wait_for_hba_online(vha);
5450}
5451
5452/*
5453 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5454 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5455 * members.
5456 */
5457void
5458qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
5459{
5460 if (!qla_tgt_mode_enabled(vha))
5461 return;
5462
0e8cd71c
SK
5463 vha->vha_tgt.qla_tgt = NULL;
5464
5465 mutex_init(&vha->vha_tgt.tgt_mutex);
5466 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
2d70c103
NB
5467
5468 qlt_clear_mode(vha);
5469
5470 /*
5471 * NOTE: Currently the value is kept the same for <24xx and
5472 * >=24xx ISPs. If it is necessary to change it,
5473 * the check should be added for specific ISPs,
5474 * assigning the value appropriately.
5475 */
5476 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
0e8cd71c
SK
5477
5478 qlt_add_target(ha, vha);
2d70c103
NB
5479}
5480
5481void
5482qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
5483{
5484 /*
5485 * FC-4 Feature bit 0 indicates target functionality to the name server.
5486 */
5487 if (qla_tgt_mode_enabled(vha)) {
5488 if (qla_ini_mode_enabled(vha))
5489 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
5490 else
5491 ct_req->req.rff_id.fc4_feature = BIT_0;
5492 } else if (qla_ini_mode_enabled(vha)) {
5493 ct_req->req.rff_id.fc4_feature = BIT_1;
5494 }
5495}
5496
5497/*
5498 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5499 * @ha: HA context
5500 *
5501 * Beginning of ATIO ring has initialization control block already built
5502 * by nvram config routine.
5503 *
5504 * Returns 0 on success.
5505 */
5506void
5507qlt_init_atio_q_entries(struct scsi_qla_host *vha)
5508{
5509 struct qla_hw_data *ha = vha->hw;
5510 uint16_t cnt;
5511 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
5512
5513 if (!qla_tgt_mode_enabled(vha))
5514 return;
5515
5516 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
5517 pkt->u.raw.signature = ATIO_PROCESSED;
5518 pkt++;
5519 }
5520
5521}
5522
5523/*
5524 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5525 * @ha: SCSI driver HA context
5526 */
5527void
5528qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5529{
5530 struct qla_hw_data *ha = vha->hw;
2d70c103
NB
5531 struct atio_from_isp *pkt;
5532 int cnt, i;
5533
5534 if (!vha->flags.online)
5535 return;
5536
5537 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
5538 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5539 cnt = pkt->u.raw.entry_count;
5540
5541 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
5542
5543 for (i = 0; i < cnt; i++) {
5544 ha->tgt.atio_ring_index++;
5545 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
5546 ha->tgt.atio_ring_index = 0;
5547 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
5548 } else
5549 ha->tgt.atio_ring_ptr++;
5550
5551 pkt->u.raw.signature = ATIO_PROCESSED;
5552 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5553 }
5554 wmb();
5555 }
5556
5557 /* Adjust ring index */
aa230bc5 5558 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
2d70c103
NB
5559}
5560
5561void
aa230bc5 5562qlt_24xx_config_rings(struct scsi_qla_host *vha)
2d70c103
NB
5563{
5564 struct qla_hw_data *ha = vha->hw;
aa230bc5
AE
5565 if (!QLA_TGT_MODE_ENABLED())
5566 return;
2d70c103 5567
aa230bc5
AE
5568 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
5569 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
5570 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
5571
5572 if (IS_ATIO_MSIX_CAPABLE(ha)) {
5573 struct qla_msix_entry *msix = &ha->msix_entries[2];
5574 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
5575
5576 icb->msix_atio = cpu_to_le16(msix->entry);
5577 ql_dbg(ql_dbg_init, vha, 0xf072,
5578 "Registering ICB vector 0x%x for atio que.\n",
5579 msix->entry);
2d70c103
NB
5580 }
5581}
5582
5583void
5584qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
5585{
5586 struct qla_hw_data *ha = vha->hw;
5587
5588 if (qla_tgt_mode_enabled(vha)) {
5589 if (!ha->tgt.saved_set) {
5590 /* We save only once */
5591 ha->tgt.saved_exchange_count = nv->exchange_count;
5592 ha->tgt.saved_firmware_options_1 =
5593 nv->firmware_options_1;
5594 ha->tgt.saved_firmware_options_2 =
5595 nv->firmware_options_2;
5596 ha->tgt.saved_firmware_options_3 =
5597 nv->firmware_options_3;
5598 ha->tgt.saved_set = 1;
5599 }
5600
5601 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
5602
5603 /* Enable target mode */
5604 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
5605
5606 /* Disable ini mode, if requested */
5607 if (!qla_ini_mode_enabled(vha))
5608 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
5609
5610 /* Disable Full Login after LIP */
5611 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5612 /* Enable initial LIP */
5613 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
d154f350
AE
5614 if (ql2xtgt_tape_enable)
5615 /* Enable FC Tape support */
5616 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5617 else
5618 /* Disable FC Tape support */
5619 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5620
2d70c103
NB
5621 /* Disable Full Login after LIP */
5622 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5623 /* Enable target PRLI control */
5624 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
5625 } else {
5626 if (ha->tgt.saved_set) {
5627 nv->exchange_count = ha->tgt.saved_exchange_count;
5628 nv->firmware_options_1 =
5629 ha->tgt.saved_firmware_options_1;
5630 nv->firmware_options_2 =
5631 ha->tgt.saved_firmware_options_2;
5632 nv->firmware_options_3 =
5633 ha->tgt.saved_firmware_options_3;
5634 }
5635 return;
5636 }
5637
5638 /* out-of-order frames reassembly */
5639 nv->firmware_options_3 |= BIT_6|BIT_9;
5640
5641 if (ha->tgt.enable_class_2) {
5642 if (vha->flags.init_done)
5643 fc_host_supported_classes(vha->host) =
5644 FC_COS_CLASS2 | FC_COS_CLASS3;
5645
5646 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
5647 } else {
5648 if (vha->flags.init_done)
5649 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5650
5651 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
5652 }
5653}
5654
5655void
5656qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
5657 struct init_cb_24xx *icb)
5658{
5659 struct qla_hw_data *ha = vha->hw;
5660
5661 if (ha->tgt.node_name_set) {
5662 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5663 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
5664 }
5665}
5666
aa230bc5
AE
5667void
5668qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
5669{
5670 struct qla_hw_data *ha = vha->hw;
5671
5672 if (!QLA_TGT_MODE_ENABLED())
5673 return;
5674
5675 if (qla_tgt_mode_enabled(vha)) {
5676 if (!ha->tgt.saved_set) {
5677 /* We save only once */
5678 ha->tgt.saved_exchange_count = nv->exchange_count;
5679 ha->tgt.saved_firmware_options_1 =
5680 nv->firmware_options_1;
5681 ha->tgt.saved_firmware_options_2 =
5682 nv->firmware_options_2;
5683 ha->tgt.saved_firmware_options_3 =
5684 nv->firmware_options_3;
5685 ha->tgt.saved_set = 1;
5686 }
5687
5688 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
5689
5690 /* Enable target mode */
5691 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
5692
5693 /* Disable ini mode, if requested */
5694 if (!qla_ini_mode_enabled(vha))
5695 nv->firmware_options_1 |=
5696 __constant_cpu_to_le32(BIT_5);
5697
5698 /* Disable Full Login after LIP */
5699 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5700 /* Enable initial LIP */
5701 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
d154f350
AE
5702 if (ql2xtgt_tape_enable)
5703 /* Enable FC tape support */
5704 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5705 else
5706 /* Disable FC tape support */
5707 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5708
aa230bc5
AE
5709 /* Disable Full Login after LIP */
5710 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5711 /* Enable target PRLI control */
5712 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
5713 } else {
5714 if (ha->tgt.saved_set) {
5715 nv->exchange_count = ha->tgt.saved_exchange_count;
5716 nv->firmware_options_1 =
5717 ha->tgt.saved_firmware_options_1;
5718 nv->firmware_options_2 =
5719 ha->tgt.saved_firmware_options_2;
5720 nv->firmware_options_3 =
5721 ha->tgt.saved_firmware_options_3;
5722 }
5723 return;
5724 }
5725
5726 /* out-of-order frames reassembly */
5727 nv->firmware_options_3 |= BIT_6|BIT_9;
5728
5729 if (ha->tgt.enable_class_2) {
5730 if (vha->flags.init_done)
5731 fc_host_supported_classes(vha->host) =
5732 FC_COS_CLASS2 | FC_COS_CLASS3;
5733
5734 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
5735 } else {
5736 if (vha->flags.init_done)
5737 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5738
5739 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
5740 }
5741}
5742
5743void
5744qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
5745 struct init_cb_81xx *icb)
5746{
5747 struct qla_hw_data *ha = vha->hw;
5748
5749 if (!QLA_TGT_MODE_ENABLED())
5750 return;
5751
5752 if (ha->tgt.node_name_set) {
5753 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5754 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
5755 }
5756}
5757
5758void
5759qlt_83xx_iospace_config(struct qla_hw_data *ha)
5760{
5761 if (!QLA_TGT_MODE_ENABLED())
5762 return;
5763
5764 ha->msix_count += 1; /* For ATIO Q */
5765}
5766
2d70c103
NB
5767int
5768qlt_24xx_process_response_error(struct scsi_qla_host *vha,
5769 struct sts_entry_24xx *pkt)
5770{
5771 switch (pkt->entry_type) {
5772 case ABTS_RECV_24XX:
5773 case ABTS_RESP_24XX:
5774 case CTIO_TYPE7:
5775 case NOTIFY_ACK_TYPE:
f83adb61 5776 case CTIO_CRC2:
2d70c103
NB
5777 return 1;
5778 default:
5779 return 0;
5780 }
5781}
5782
5783void
5784qlt_modify_vp_config(struct scsi_qla_host *vha,
5785 struct vp_config_entry_24xx *vpmod)
5786{
5787 if (qla_tgt_mode_enabled(vha))
5788 vpmod->options_idx1 &= ~BIT_5;
5789 /* Disable ini mode, if requested */
5790 if (!qla_ini_mode_enabled(vha))
5791 vpmod->options_idx1 &= ~BIT_4;
5792}
5793
5794void
5795qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5796{
5797 if (!QLA_TGT_MODE_ENABLED())
5798 return;
5799
aa230bc5
AE
5800 if (ha->mqenable || IS_QLA83XX(ha)) {
5801 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5802 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5803 } else {
5804 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
5805 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
5806 }
5807
0e8cd71c
SK
5808 mutex_init(&base_vha->vha_tgt.tgt_mutex);
5809 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
2d70c103
NB
5810 qlt_clear_mode(base_vha);
5811}
5812
aa230bc5
AE
5813irqreturn_t
5814qla83xx_msix_atio_q(int irq, void *dev_id)
5815{
5816 struct rsp_que *rsp;
5817 scsi_qla_host_t *vha;
5818 struct qla_hw_data *ha;
5819 unsigned long flags;
5820
5821 rsp = (struct rsp_que *) dev_id;
5822 ha = rsp->hw;
5823 vha = pci_get_drvdata(ha->pdev);
5824
5825 spin_lock_irqsave(&ha->hardware_lock, flags);
5826
5827 qlt_24xx_process_atio_queue(vha);
5828 qla24xx_process_response_queue(vha, rsp);
5829
5830 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5831
5832 return IRQ_HANDLED;
5833}
5834
2d70c103
NB
5835int
5836qlt_mem_alloc(struct qla_hw_data *ha)
5837{
5838 if (!QLA_TGT_MODE_ENABLED())
5839 return 0;
5840
5841 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
5842 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
5843 if (!ha->tgt.tgt_vp_map)
5844 return -ENOMEM;
5845
5846 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
5847 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
5848 &ha->tgt.atio_dma, GFP_KERNEL);
5849 if (!ha->tgt.atio_ring) {
5850 kfree(ha->tgt.tgt_vp_map);
5851 return -ENOMEM;
5852 }
5853 return 0;
5854}
5855
5856void
5857qlt_mem_free(struct qla_hw_data *ha)
5858{
5859 if (!QLA_TGT_MODE_ENABLED())
5860 return;
5861
5862 if (ha->tgt.atio_ring) {
5863 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
5864 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
5865 ha->tgt.atio_dma);
5866 }
5867 kfree(ha->tgt.tgt_vp_map);
5868}
5869
5870/* vport_slock to be held by the caller */
5871void
5872qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
5873{
5874 if (!QLA_TGT_MODE_ENABLED())
5875 return;
5876
5877 switch (cmd) {
5878 case SET_VP_IDX:
5879 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
5880 break;
5881 case SET_AL_PA:
5882 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
5883 break;
5884 case RESET_VP_IDX:
5885 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
5886 break;
5887 case RESET_AL_PA:
5888 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
5889 break;
5890 }
5891}
5892
5893static int __init qlt_parse_ini_mode(void)
5894{
5895 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
5896 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
5897 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
5898 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
5899 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
5900 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
5901 else
5902 return false;
5903
5904 return true;
5905}
5906
5907int __init qlt_init(void)
5908{
5909 int ret;
5910
5911 if (!qlt_parse_ini_mode()) {
5912 ql_log(ql_log_fatal, NULL, 0xe06b,
5913 "qlt_parse_ini_mode() failed\n");
5914 return -EINVAL;
5915 }
5916
5917 if (!QLA_TGT_MODE_ENABLED())
5918 return 0;
5919
2d70c103
NB
5920 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5921 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
5922 qla_tgt_mgmt_cmd), 0, NULL);
5923 if (!qla_tgt_mgmt_cmd_cachep) {
5924 ql_log(ql_log_fatal, NULL, 0xe06d,
5925 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
51a07f84 5926 return -ENOMEM;
2d70c103
NB
5927 }
5928
5929 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
5930 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
5931 if (!qla_tgt_mgmt_cmd_mempool) {
5932 ql_log(ql_log_fatal, NULL, 0xe06e,
5933 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5934 ret = -ENOMEM;
5935 goto out_mgmt_cmd_cachep;
5936 }
5937
5938 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
5939 if (!qla_tgt_wq) {
5940 ql_log(ql_log_fatal, NULL, 0xe06f,
5941 "alloc_workqueue for qla_tgt_wq failed\n");
5942 ret = -ENOMEM;
5943 goto out_cmd_mempool;
5944 }
5945 /*
5946 * Return 1 to signal that initiator-mode is being disabled
5947 */
5948 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
5949
5950out_cmd_mempool:
5951 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5952out_mgmt_cmd_cachep:
5953 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
2d70c103
NB
5954 return ret;
5955}
5956
5957void qlt_exit(void)
5958{
5959 if (!QLA_TGT_MODE_ENABLED())
5960 return;
5961
5962 destroy_workqueue(qla_tgt_wq);
5963 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5964 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
2d70c103 5965}
This page took 0.578984 seconds and 5 git commands to generate.