2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t
*sp
)
26 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
27 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
31 /* Set transfer direction */
32 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
34 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
35 vha
->qla_stats
.output_requests
++;
36 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
38 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
39 vha
->qla_stats
.input_requests
++;
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
48 * @dsds: number of data segment decriptors needed
50 * Returns the number of IOCB entries needed to store @dsds.
53 qla2x00_calc_iocbs_32(uint16_t dsds
)
59 iocbs
+= (dsds
- 3) / 7;
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
70 * @dsds: number of data segment decriptors needed
72 * Returns the number of IOCB entries needed to store @dsds.
75 qla2x00_calc_iocbs_64(uint16_t dsds
)
81 iocbs
+= (dsds
- 2) / 5;
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 static inline cont_entry_t
*
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
97 cont_entry_t
*cont_pkt
;
98 struct req_que
*req
= vha
->req
;
99 /* Adjust ring index. */
101 if (req
->ring_index
== req
->length
) {
103 req
->ring_ptr
= req
->ring
;
108 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
110 /* Load packet defaults. */
111 *((uint32_t *)(&cont_pkt
->entry_type
)) =
112 __constant_cpu_to_le32(CONTINUE_TYPE
);
118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
121 * Returns a pointer to the continuation type 1 IOCB packet.
123 static inline cont_a64_entry_t
*
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
126 cont_a64_entry_t
*cont_pkt
;
128 /* Adjust ring index. */
130 if (req
->ring_index
== req
->length
) {
132 req
->ring_ptr
= req
->ring
;
137 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt
->entry_type
)) = IS_QLAFX00(vha
->hw
) ?
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00
) :
142 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
148 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
150 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
151 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
153 /* We always use DIFF Bundling for best performance */
156 /* Translate SCSI opcode to a protection opcode */
157 switch (scsi_get_prot_op(cmd
)) {
158 case SCSI_PROT_READ_STRIP
:
159 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
161 case SCSI_PROT_WRITE_INSERT
:
162 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
164 case SCSI_PROT_READ_INSERT
:
165 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
167 case SCSI_PROT_WRITE_STRIP
:
168 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
170 case SCSI_PROT_READ_PASS
:
171 case SCSI_PROT_WRITE_PASS
:
172 if (guard
& SHOST_DIX_GUARD_IP
)
173 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
177 default: /* Normal Request */
178 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
182 return scsi_prot_sg_count(cmd
);
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
193 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
198 scsi_qla_host_t
*vha
;
199 struct scsi_cmnd
*cmd
;
200 struct scatterlist
*sg
;
203 cmd
= GET_CMD_SP(sp
);
205 /* Update entry type to indicate Command Type 2 IOCB */
206 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
207 __constant_cpu_to_le32(COMMAND_TYPE
);
209 /* No data transfer */
210 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
211 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
215 vha
= sp
->fcport
->vha
;
216 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
218 /* Three DSDs are available in the Command Type 2 IOCB */
220 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
222 /* Load data segments */
223 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
224 cont_entry_t
*cont_pkt
;
226 /* Allocate additional continuation packets? */
227 if (avail_dsds
== 0) {
229 * Seven DSDs are available in the Continuation
232 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
233 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
237 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
238 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245 * capable IOCB types.
247 * @sp: SRB command to process
248 * @cmd_pkt: Command type 3 IOCB
249 * @tot_dsds: Total number of segments to transfer
251 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
256 scsi_qla_host_t
*vha
;
257 struct scsi_cmnd
*cmd
;
258 struct scatterlist
*sg
;
261 cmd
= GET_CMD_SP(sp
);
263 /* Update entry type to indicate Command Type 3 IOCB */
264 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
265 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
267 /* No data transfer */
268 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
269 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
273 vha
= sp
->fcport
->vha
;
274 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
276 /* Two DSDs are available in the Command Type 3 IOCB */
278 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
280 /* Load data segments */
281 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
283 cont_a64_entry_t
*cont_pkt
;
285 /* Allocate additional continuation packets? */
286 if (avail_dsds
== 0) {
288 * Five DSDs are available in the Continuation
291 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
292 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
296 sle_dma
= sg_dma_address(sg
);
297 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
298 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
299 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
305 * qla2x00_start_scsi() - Send a SCSI command to the ISP
306 * @sp: command to send to the ISP
308 * Returns non-zero if a failure occurred, else zero.
311 qla2x00_start_scsi(srb_t
*sp
)
315 scsi_qla_host_t
*vha
;
316 struct scsi_cmnd
*cmd
;
320 cmd_entry_t
*cmd_pkt
;
324 struct device_reg_2xxx __iomem
*reg
;
325 struct qla_hw_data
*ha
;
330 /* Setup device pointers. */
332 vha
= sp
->fcport
->vha
;
334 reg
= &ha
->iobase
->isp
;
335 cmd
= GET_CMD_SP(sp
);
336 req
= ha
->req_q_map
[0];
337 rsp
= ha
->rsp_q_map
[0];
338 /* So we know we haven't pci_map'ed anything yet */
341 /* Send marker if required */
342 if (vha
->marker_needed
!= 0) {
343 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
345 return (QLA_FUNCTION_FAILED
);
347 vha
->marker_needed
= 0;
350 /* Acquire ring specific lock */
351 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
353 /* Check for room in outstanding command list. */
354 handle
= req
->current_outstanding_cmd
;
355 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
357 if (handle
== req
->num_outstanding_cmds
)
359 if (!req
->outstanding_cmds
[handle
])
362 if (index
== req
->num_outstanding_cmds
)
365 /* Map the sg table so we have an accurate count of sg entries needed */
366 if (scsi_sg_count(cmd
)) {
367 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
368 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
376 /* Calculate the number of request entries needed. */
377 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
378 if (req
->cnt
< (req_cnt
+ 2)) {
379 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
380 if (req
->ring_index
< cnt
)
381 req
->cnt
= cnt
- req
->ring_index
;
383 req
->cnt
= req
->length
-
384 (req
->ring_index
- cnt
);
385 /* If still no head room then bail out */
386 if (req
->cnt
< (req_cnt
+ 2))
390 /* Build command packet */
391 req
->current_outstanding_cmd
= handle
;
392 req
->outstanding_cmds
[handle
] = sp
;
394 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
397 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
398 cmd_pkt
->handle
= handle
;
399 /* Zero out remaining portion of packet. */
400 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
401 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
402 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
406 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd
, tag
)) {
411 case HEAD_OF_QUEUE_TAG
:
412 cmd_pkt
->control_flags
=
413 __constant_cpu_to_le16(CF_HEAD_TAG
);
415 case ORDERED_QUEUE_TAG
:
416 cmd_pkt
->control_flags
=
417 __constant_cpu_to_le16(CF_ORDERED_TAG
);
420 cmd_pkt
->control_flags
=
421 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
425 cmd_pkt
->control_flags
= __constant_cpu_to_le16(CF_SIMPLE_TAG
);
428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
430 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
432 /* Build IOCB segments */
433 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
435 /* Set total data segment count. */
436 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
439 /* Adjust ring index. */
441 if (req
->ring_index
== req
->length
) {
443 req
->ring_ptr
= req
->ring
;
447 sp
->flags
|= SRB_DMA_VALID
;
449 /* Set chip new ring index. */
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
453 /* Manage unprocessed RIO/ZIO commands in response queue. */
454 if (vha
->flags
.process_response_queue
&&
455 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
456 qla2x00_process_response_queue(rsp
);
458 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
459 return (QLA_SUCCESS
);
465 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
467 return (QLA_FUNCTION_FAILED
);
471 * qla2x00_start_iocbs() - Execute the IOCB command
474 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
476 struct qla_hw_data
*ha
= vha
->hw
;
477 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
479 if (IS_P3P_TYPE(ha
)) {
480 qla82xx_start_iocbs(vha
);
482 /* Adjust ring index. */
484 if (req
->ring_index
== req
->length
) {
486 req
->ring_ptr
= req
->ring
;
490 /* Set chip new ring index. */
491 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
)) {
492 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
493 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
494 } else if (IS_QLAFX00(ha
)) {
495 WRT_REG_DWORD(®
->ispfx00
.req_q_in
, req
->ring_index
);
496 RD_REG_DWORD_RELAXED(®
->ispfx00
.req_q_in
);
497 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
498 } else if (IS_FWI2_CAPABLE(ha
)) {
499 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
500 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
502 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
504 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
510 * qla2x00_marker() - Send a marker IOCB to the firmware.
514 * @type: marker modifier
516 * Can be called from both normal and interrupt context.
518 * Returns non-zero if a failure occurred, else zero.
521 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
522 struct rsp_que
*rsp
, uint16_t loop_id
,
523 uint16_t lun
, uint8_t type
)
526 struct mrk_entry_24xx
*mrk24
= NULL
;
528 struct qla_hw_data
*ha
= vha
->hw
;
529 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
531 req
= ha
->req_q_map
[0];
532 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
534 ql_log(ql_log_warn
, base_vha
, 0x3026,
535 "Failed to allocate Marker IOCB.\n");
537 return (QLA_FUNCTION_FAILED
);
540 mrk
->entry_type
= MARKER_TYPE
;
541 mrk
->modifier
= type
;
542 if (type
!= MK_SYNC_ALL
) {
543 if (IS_FWI2_CAPABLE(ha
)) {
544 mrk24
= (struct mrk_entry_24xx
*) mrk
;
545 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
546 mrk24
->lun
[1] = LSB(lun
);
547 mrk24
->lun
[2] = MSB(lun
);
548 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
549 mrk24
->vp_index
= vha
->vp_idx
;
550 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
552 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
553 mrk
->lun
= cpu_to_le16(lun
);
558 qla2x00_start_iocbs(vha
, req
);
560 return (QLA_SUCCESS
);
564 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
565 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
569 unsigned long flags
= 0;
571 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
572 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
573 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
579 * qla2x00_issue_marker
582 * Caller CAN have hardware lock held as specified by ha_locked parameter.
583 * Might release it, then reaquire.
585 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
588 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
589 MK_SYNC_ALL
) != QLA_SUCCESS
)
590 return QLA_FUNCTION_FAILED
;
592 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
593 MK_SYNC_ALL
) != QLA_SUCCESS
)
594 return QLA_FUNCTION_FAILED
;
596 vha
->marker_needed
= 0;
602 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
605 uint32_t *cur_dsd
= NULL
;
606 scsi_qla_host_t
*vha
;
607 struct qla_hw_data
*ha
;
608 struct scsi_cmnd
*cmd
;
609 struct scatterlist
*cur_seg
;
613 uint8_t first_iocb
= 1;
614 uint32_t dsd_list_len
;
615 struct dsd_dma
*dsd_ptr
;
618 cmd
= GET_CMD_SP(sp
);
620 /* Update entry type to indicate Command Type 3 IOCB */
621 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
622 __constant_cpu_to_le32(COMMAND_TYPE_6
);
624 /* No data transfer */
625 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
626 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
630 vha
= sp
->fcport
->vha
;
633 /* Set transfer direction */
634 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
635 cmd_pkt
->control_flags
=
636 __constant_cpu_to_le16(CF_WRITE_DATA
);
637 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
638 vha
->qla_stats
.output_requests
++;
639 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
640 cmd_pkt
->control_flags
=
641 __constant_cpu_to_le16(CF_READ_DATA
);
642 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
643 vha
->qla_stats
.input_requests
++;
646 cur_seg
= scsi_sglist(cmd
);
647 ctx
= GET_CMD_CTX_SP(sp
);
650 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
651 QLA_DSDS_PER_IOCB
: tot_dsds
;
652 tot_dsds
-= avail_dsds
;
653 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
655 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
656 struct dsd_dma
, list
);
657 next_dsd
= dsd_ptr
->dsd_addr
;
658 list_del(&dsd_ptr
->list
);
660 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
666 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
667 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
668 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
669 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
671 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
672 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
673 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
675 cur_dsd
= (uint32_t *)next_dsd
;
679 sle_dma
= sg_dma_address(cur_seg
);
680 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
681 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
682 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
683 cur_seg
= sg_next(cur_seg
);
688 /* Null termination */
692 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
697 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
698 * for Command Type 6.
700 * @dsds: number of data segment decriptors needed
702 * Returns the number of dsd list needed to store @dsds.
705 qla24xx_calc_dsd_lists(uint16_t dsds
)
707 uint16_t dsd_lists
= 0;
709 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
710 if (dsds
% QLA_DSDS_PER_IOCB
)
717 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
720 * @sp: SRB command to process
721 * @cmd_pkt: Command type 3 IOCB
722 * @tot_dsds: Total number of segments to transfer
725 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
730 scsi_qla_host_t
*vha
;
731 struct scsi_cmnd
*cmd
;
732 struct scatterlist
*sg
;
736 cmd
= GET_CMD_SP(sp
);
738 /* Update entry type to indicate Command Type 3 IOCB */
739 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
740 __constant_cpu_to_le32(COMMAND_TYPE_7
);
742 /* No data transfer */
743 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
744 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
748 vha
= sp
->fcport
->vha
;
751 /* Set transfer direction */
752 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
753 cmd_pkt
->task_mgmt_flags
=
754 __constant_cpu_to_le16(TMF_WRITE_DATA
);
755 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
756 vha
->qla_stats
.output_requests
++;
757 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
758 cmd_pkt
->task_mgmt_flags
=
759 __constant_cpu_to_le16(TMF_READ_DATA
);
760 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
761 vha
->qla_stats
.input_requests
++;
764 /* One DSD is available in the Command Type 3 IOCB */
766 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
768 /* Load data segments */
770 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
772 cont_a64_entry_t
*cont_pkt
;
774 /* Allocate additional continuation packets? */
775 if (avail_dsds
== 0) {
777 * Five DSDs are available in the Continuation
780 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
781 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
785 sle_dma
= sg_dma_address(sg
);
786 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
787 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
788 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
793 struct fw_dif_context
{
796 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
797 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
801 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
805 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
806 unsigned int protcnt
)
808 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
810 switch (scsi_get_prot_type(cmd
)) {
811 case SCSI_PROT_DIF_TYPE0
:
813 * No check for ql2xenablehba_err_chk, as it would be an
814 * I/O error if hba tag generation is not done.
816 pkt
->ref_tag
= cpu_to_le32((uint32_t)
817 (0xffffffff & scsi_get_lba(cmd
)));
819 if (!qla2x00_hba_err_chk_enabled(sp
))
822 pkt
->ref_tag_mask
[0] = 0xff;
823 pkt
->ref_tag_mask
[1] = 0xff;
824 pkt
->ref_tag_mask
[2] = 0xff;
825 pkt
->ref_tag_mask
[3] = 0xff;
829 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
830 * match LBA in CDB + N
832 case SCSI_PROT_DIF_TYPE2
:
833 pkt
->app_tag
= __constant_cpu_to_le16(0);
834 pkt
->app_tag_mask
[0] = 0x0;
835 pkt
->app_tag_mask
[1] = 0x0;
837 pkt
->ref_tag
= cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd
)));
840 if (!qla2x00_hba_err_chk_enabled(sp
))
843 /* enable ALL bytes of the ref tag */
844 pkt
->ref_tag_mask
[0] = 0xff;
845 pkt
->ref_tag_mask
[1] = 0xff;
846 pkt
->ref_tag_mask
[2] = 0xff;
847 pkt
->ref_tag_mask
[3] = 0xff;
850 /* For Type 3 protection: 16 bit GUARD only */
851 case SCSI_PROT_DIF_TYPE3
:
852 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
853 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
858 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
861 case SCSI_PROT_DIF_TYPE1
:
862 pkt
->ref_tag
= cpu_to_le32((uint32_t)
863 (0xffffffff & scsi_get_lba(cmd
)));
864 pkt
->app_tag
= __constant_cpu_to_le16(0);
865 pkt
->app_tag_mask
[0] = 0x0;
866 pkt
->app_tag_mask
[1] = 0x0;
868 if (!qla2x00_hba_err_chk_enabled(sp
))
871 /* enable ALL bytes of the ref tag */
872 pkt
->ref_tag_mask
[0] = 0xff;
873 pkt
->ref_tag_mask
[1] = 0xff;
874 pkt
->ref_tag_mask
[2] = 0xff;
875 pkt
->ref_tag_mask
[3] = 0xff;
881 dma_addr_t dma_addr
; /* OUT */
882 uint32_t dma_len
; /* OUT */
884 uint32_t tot_bytes
; /* IN */
885 struct scatterlist
*cur_sg
; /* IN */
887 /* for book keeping, bzero on initial invocation */
888 uint32_t bytes_consumed
;
890 uint32_t tot_partial
;
898 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
901 struct scatterlist
*sg
;
902 uint32_t cumulative_partial
, sg_len
;
903 dma_addr_t sg_dma_addr
;
905 if (sgx
->num_bytes
== sgx
->tot_bytes
)
909 cumulative_partial
= sgx
->tot_partial
;
911 sg_dma_addr
= sg_dma_address(sg
);
912 sg_len
= sg_dma_len(sg
);
914 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
916 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
917 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
918 sgx
->tot_partial
= 0;
919 sgx
->num_bytes
+= blk_sz
;
922 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
923 sgx
->tot_partial
+= sgx
->dma_len
;
927 sgx
->bytes_consumed
+= sgx
->dma_len
;
929 if (sg_len
== sgx
->bytes_consumed
) {
933 sgx
->bytes_consumed
= 0;
940 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
941 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
944 uint8_t avail_dsds
= 0;
945 uint32_t dsd_list_len
;
946 struct dsd_dma
*dsd_ptr
;
947 struct scatterlist
*sg_prot
;
948 uint32_t *cur_dsd
= dsd
;
949 uint16_t used_dsds
= tot_dsds
;
951 uint32_t prot_int
; /* protection interval */
955 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
956 struct scsi_cmnd
*cmd
;
957 struct scsi_qla_host
*vha
;
959 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
961 vha
= sp
->fcport
->vha
;
962 cmd
= GET_CMD_SP(sp
);
963 prot_int
= cmd
->device
->sector_size
;
965 sgx
.tot_bytes
= scsi_bufflen(cmd
);
966 sgx
.cur_sg
= scsi_sglist(cmd
);
969 sg_prot
= scsi_prot_sglist(cmd
);
972 prot_int
= tc
->blk_sz
;
973 sgx
.tot_bytes
= tc
->bufflen
;
975 sg_prot
= tc
->prot_sg
;
981 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
983 sle_dma
= sgx
.dma_addr
;
984 sle_dma_len
= sgx
.dma_len
;
986 /* Allocate additional continuation packets? */
987 if (avail_dsds
== 0) {
988 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
989 QLA_DSDS_PER_IOCB
: used_dsds
;
990 dsd_list_len
= (avail_dsds
+ 1) * 12;
991 used_dsds
-= avail_dsds
;
993 /* allocate tracking DS */
994 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
998 /* allocate new list */
999 dsd_ptr
->dsd_addr
= next_dsd
=
1000 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1001 &dsd_ptr
->dsd_list_dma
);
1005 * Need to cleanup only this dsd_ptr, rest
1006 * will be done by sp_free_dma()
1013 list_add_tail(&dsd_ptr
->list
,
1014 &((struct crc_context
*)
1015 sp
->u
.scmd
.ctx
)->dsd_list
);
1017 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1019 list_add_tail(&dsd_ptr
->list
,
1020 &(tc
->ctx
->dsd_list
));
1021 tc
->ctx_dsd_alloced
= 1;
1025 /* add new list to cmd iocb or last list */
1026 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1027 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1028 *cur_dsd
++ = dsd_list_len
;
1029 cur_dsd
= (uint32_t *)next_dsd
;
1031 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1032 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1033 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
1037 /* Got a full protection interval */
1038 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
1041 tot_prot_dma_len
+= sle_dma_len
;
1042 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
1043 tot_prot_dma_len
= 0;
1044 sg_prot
= sg_next(sg_prot
);
1047 partial
= 1; /* So as to not re-enter this block */
1048 goto alloc_and_fill
;
1051 /* Null termination */
1059 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1060 uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
1063 uint8_t avail_dsds
= 0;
1064 uint32_t dsd_list_len
;
1065 struct dsd_dma
*dsd_ptr
;
1066 struct scatterlist
*sg
, *sgl
;
1067 uint32_t *cur_dsd
= dsd
;
1069 uint16_t used_dsds
= tot_dsds
;
1070 struct scsi_cmnd
*cmd
;
1071 struct scsi_qla_host
*vha
;
1074 cmd
= GET_CMD_SP(sp
);
1075 sgl
= scsi_sglist(cmd
);
1076 vha
= sp
->fcport
->vha
;
1086 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1089 /* Allocate additional continuation packets? */
1090 if (avail_dsds
== 0) {
1091 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1092 QLA_DSDS_PER_IOCB
: used_dsds
;
1093 dsd_list_len
= (avail_dsds
+ 1) * 12;
1094 used_dsds
-= avail_dsds
;
1096 /* allocate tracking DS */
1097 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1101 /* allocate new list */
1102 dsd_ptr
->dsd_addr
= next_dsd
=
1103 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1104 &dsd_ptr
->dsd_list_dma
);
1108 * Need to cleanup only this dsd_ptr, rest
1109 * will be done by sp_free_dma()
1116 list_add_tail(&dsd_ptr
->list
,
1117 &((struct crc_context
*)
1118 sp
->u
.scmd
.ctx
)->dsd_list
);
1120 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1122 list_add_tail(&dsd_ptr
->list
,
1123 &(tc
->ctx
->dsd_list
));
1124 tc
->ctx_dsd_alloced
= 1;
1127 /* add new list to cmd iocb or last list */
1128 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1129 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1130 *cur_dsd
++ = dsd_list_len
;
1131 cur_dsd
= (uint32_t *)next_dsd
;
1133 sle_dma
= sg_dma_address(sg
);
1135 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1136 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1137 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1141 /* Null termination */
1149 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1150 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
1153 uint8_t avail_dsds
= 0;
1154 uint32_t dsd_list_len
;
1155 struct dsd_dma
*dsd_ptr
;
1156 struct scatterlist
*sg
, *sgl
;
1158 struct scsi_cmnd
*cmd
;
1159 uint32_t *cur_dsd
= dsd
;
1160 uint16_t used_dsds
= tot_dsds
;
1161 struct scsi_qla_host
*vha
;
1164 cmd
= GET_CMD_SP(sp
);
1165 sgl
= scsi_prot_sglist(cmd
);
1166 vha
= sp
->fcport
->vha
;
1175 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
1176 "%s: enter\n", __func__
);
1178 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1181 /* Allocate additional continuation packets? */
1182 if (avail_dsds
== 0) {
1183 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1184 QLA_DSDS_PER_IOCB
: used_dsds
;
1185 dsd_list_len
= (avail_dsds
+ 1) * 12;
1186 used_dsds
-= avail_dsds
;
1188 /* allocate tracking DS */
1189 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1193 /* allocate new list */
1194 dsd_ptr
->dsd_addr
= next_dsd
=
1195 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1196 &dsd_ptr
->dsd_list_dma
);
1200 * Need to cleanup only this dsd_ptr, rest
1201 * will be done by sp_free_dma()
1208 list_add_tail(&dsd_ptr
->list
,
1209 &((struct crc_context
*)
1210 sp
->u
.scmd
.ctx
)->dsd_list
);
1212 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1214 list_add_tail(&dsd_ptr
->list
,
1215 &(tc
->ctx
->dsd_list
));
1216 tc
->ctx_dsd_alloced
= 1;
1219 /* add new list to cmd iocb or last list */
1220 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1221 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1222 *cur_dsd
++ = dsd_list_len
;
1223 cur_dsd
= (uint32_t *)next_dsd
;
1225 sle_dma
= sg_dma_address(sg
);
1227 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1228 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1229 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1233 /* Null termination */
1241 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1242 * Type 6 IOCB types.
1244 * @sp: SRB command to process
1245 * @cmd_pkt: Command type 3 IOCB
1246 * @tot_dsds: Total number of segments to transfer
1249 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1250 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1252 uint32_t *cur_dsd
, *fcp_dl
;
1253 scsi_qla_host_t
*vha
;
1254 struct scsi_cmnd
*cmd
;
1256 uint32_t total_bytes
= 0;
1257 uint32_t data_bytes
;
1259 uint8_t bundling
= 1;
1262 struct crc_context
*crc_ctx_pkt
= NULL
;
1263 struct qla_hw_data
*ha
;
1264 uint8_t additional_fcpcdb_len
;
1265 uint16_t fcp_cmnd_len
;
1266 struct fcp_cmnd
*fcp_cmnd
;
1267 dma_addr_t crc_ctx_dma
;
1270 cmd
= GET_CMD_SP(sp
);
1273 /* Update entry type to indicate Command Type CRC_2 IOCB */
1274 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
1275 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
1277 vha
= sp
->fcport
->vha
;
1280 /* No data transfer */
1281 data_bytes
= scsi_bufflen(cmd
);
1282 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1283 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1287 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1289 /* Set transfer direction */
1290 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1291 cmd_pkt
->control_flags
=
1292 __constant_cpu_to_le16(CF_WRITE_DATA
);
1293 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1294 cmd_pkt
->control_flags
=
1295 __constant_cpu_to_le16(CF_READ_DATA
);
1298 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1299 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1300 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1301 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1304 /* Allocate CRC context from global pool */
1305 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1306 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1309 goto crc_queuing_error
;
1311 /* Zero out CTX area. */
1312 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1313 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1315 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1317 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1320 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1322 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1324 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1325 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1327 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1328 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1329 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1331 /* Determine SCSI command length -- align to 4 byte boundary */
1332 if (cmd
->cmd_len
> 16) {
1333 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1334 if ((cmd
->cmd_len
% 4) != 0) {
1335 /* SCSI cmd > 16 bytes must be multiple of 4 */
1336 goto crc_queuing_error
;
1338 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1340 additional_fcpcdb_len
= 0;
1341 fcp_cmnd_len
= 12 + 16 + 4;
1344 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1346 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1347 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1348 fcp_cmnd
->additional_cdb_len
|= 1;
1349 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1350 fcp_cmnd
->additional_cdb_len
|= 2;
1352 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1353 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1354 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1355 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1356 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1357 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1358 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1359 fcp_cmnd
->task_management
= 0;
1362 * Update tagged queuing modifier if using command tag queuing
1364 if (scsi_populate_tag_msg(cmd
, tag
)) {
1366 case HEAD_OF_QUEUE_TAG
:
1367 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1369 case ORDERED_QUEUE_TAG
:
1370 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1373 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1377 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1380 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1382 /* Compute dif len and adjust data len to incude protection */
1384 blk_size
= cmd
->device
->sector_size
;
1385 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1387 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1388 case SCSI_PROT_READ_INSERT
:
1389 case SCSI_PROT_WRITE_STRIP
:
1390 total_bytes
= data_bytes
;
1391 data_bytes
+= dif_bytes
;
1394 case SCSI_PROT_READ_STRIP
:
1395 case SCSI_PROT_WRITE_INSERT
:
1396 case SCSI_PROT_READ_PASS
:
1397 case SCSI_PROT_WRITE_PASS
:
1398 total_bytes
= data_bytes
+ dif_bytes
;
1404 if (!qla2x00_hba_err_chk_enabled(sp
))
1405 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1406 /* HBA error checking enabled */
1407 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1408 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1409 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1410 SCSI_PROT_DIF_TYPE2
))
1411 fw_prot_opts
|= BIT_10
;
1412 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1413 SCSI_PROT_DIF_TYPE3
)
1414 fw_prot_opts
|= BIT_11
;
1418 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1421 * Configure Bundling if we need to fetch interlaving
1422 * protection PCI accesses
1424 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1425 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1426 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1428 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1431 /* Finish the common fields of CRC pkt */
1432 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1433 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1434 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1435 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1436 /* Fibre channel byte count */
1437 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1438 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1439 additional_fcpcdb_len
);
1440 *fcp_dl
= htonl(total_bytes
);
1442 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1443 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1446 /* Walks data segments */
1448 cmd_pkt
->control_flags
|=
1449 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1451 if (!bundling
&& tot_prot_dsds
) {
1452 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1453 cur_dsd
, tot_dsds
, NULL
))
1454 goto crc_queuing_error
;
1455 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1456 (tot_dsds
- tot_prot_dsds
), NULL
))
1457 goto crc_queuing_error
;
1459 if (bundling
&& tot_prot_dsds
) {
1460 /* Walks dif segments */
1461 cmd_pkt
->control_flags
|=
1462 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1463 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1464 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1465 tot_prot_dsds
, NULL
))
1466 goto crc_queuing_error
;
1471 /* Cleanup will be performed by the caller */
1473 return QLA_FUNCTION_FAILED
;
1477 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1478 * @sp: command to send to the ISP
1480 * Returns non-zero if a failure occurred, else zero.
1483 qla24xx_start_scsi(srb_t
*sp
)
1486 unsigned long flags
;
1490 struct cmd_type_7
*cmd_pkt
;
1494 struct req_que
*req
= NULL
;
1495 struct rsp_que
*rsp
= NULL
;
1496 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1497 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1498 struct qla_hw_data
*ha
= vha
->hw
;
1501 /* Setup device pointers. */
1504 qla25xx_set_que(sp
, &rsp
);
1507 /* So we know we haven't pci_map'ed anything yet */
1510 /* Send marker if required */
1511 if (vha
->marker_needed
!= 0) {
1512 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1514 return QLA_FUNCTION_FAILED
;
1515 vha
->marker_needed
= 0;
1518 /* Acquire ring specific lock */
1519 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1521 /* Check for room in outstanding command list. */
1522 handle
= req
->current_outstanding_cmd
;
1523 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1525 if (handle
== req
->num_outstanding_cmds
)
1527 if (!req
->outstanding_cmds
[handle
])
1530 if (index
== req
->num_outstanding_cmds
)
1533 /* Map the sg table so we have an accurate count of sg entries needed */
1534 if (scsi_sg_count(cmd
)) {
1535 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1536 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1537 if (unlikely(!nseg
))
1543 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1544 if (req
->cnt
< (req_cnt
+ 2)) {
1545 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1546 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1547 if (req
->ring_index
< cnt
)
1548 req
->cnt
= cnt
- req
->ring_index
;
1550 req
->cnt
= req
->length
-
1551 (req
->ring_index
- cnt
);
1552 if (req
->cnt
< (req_cnt
+ 2))
1556 /* Build command packet. */
1557 req
->current_outstanding_cmd
= handle
;
1558 req
->outstanding_cmds
[handle
] = sp
;
1559 sp
->handle
= handle
;
1560 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1561 req
->cnt
-= req_cnt
;
1563 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1564 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1566 /* Zero out remaining portion of packet. */
1567 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1568 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1569 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1570 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1572 /* Set NPORT-ID and LUN number*/
1573 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1574 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1575 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1576 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1577 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1579 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1580 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1582 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1583 if (scsi_populate_tag_msg(cmd
, tag
)) {
1585 case HEAD_OF_QUEUE_TAG
:
1586 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1588 case ORDERED_QUEUE_TAG
:
1589 cmd_pkt
->task
= TSK_ORDERED
;
1592 cmd_pkt
->task
= TSK_SIMPLE
;
1596 cmd_pkt
->task
= TSK_SIMPLE
;
1599 /* Load SCSI command packet. */
1600 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1601 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1603 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1605 /* Build IOCB segments */
1606 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1608 /* Set total data segment count. */
1609 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1610 /* Specify response queue number where completion should happen */
1611 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1613 /* Adjust ring index. */
1615 if (req
->ring_index
== req
->length
) {
1616 req
->ring_index
= 0;
1617 req
->ring_ptr
= req
->ring
;
1621 sp
->flags
|= SRB_DMA_VALID
;
1623 /* Set chip new ring index. */
1624 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1625 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1627 /* Manage unprocessed RIO/ZIO commands in response queue. */
1628 if (vha
->flags
.process_response_queue
&&
1629 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1630 qla24xx_process_response_queue(vha
, rsp
);
1632 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1637 scsi_dma_unmap(cmd
);
1639 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1641 return QLA_FUNCTION_FAILED
;
1645 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1646 * @sp: command to send to the ISP
1648 * Returns non-zero if a failure occurred, else zero.
1651 qla24xx_dif_start_scsi(srb_t
*sp
)
1654 unsigned long flags
;
1659 uint16_t req_cnt
= 0;
1661 uint16_t tot_prot_dsds
;
1662 uint16_t fw_prot_opts
= 0;
1663 struct req_que
*req
= NULL
;
1664 struct rsp_que
*rsp
= NULL
;
1665 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1666 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1667 struct qla_hw_data
*ha
= vha
->hw
;
1668 struct cmd_type_crc_2
*cmd_pkt
;
1669 uint32_t status
= 0;
1671 #define QDSS_GOT_Q_SPACE BIT_0
1673 /* Only process protection or >16 cdb in this routine */
1674 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1675 if (cmd
->cmd_len
<= 16)
1676 return qla24xx_start_scsi(sp
);
1679 /* Setup device pointers. */
1681 qla25xx_set_que(sp
, &rsp
);
1684 /* So we know we haven't pci_map'ed anything yet */
1687 /* Send marker if required */
1688 if (vha
->marker_needed
!= 0) {
1689 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1691 return QLA_FUNCTION_FAILED
;
1692 vha
->marker_needed
= 0;
1695 /* Acquire ring specific lock */
1696 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1698 /* Check for room in outstanding command list. */
1699 handle
= req
->current_outstanding_cmd
;
1700 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1702 if (handle
== req
->num_outstanding_cmds
)
1704 if (!req
->outstanding_cmds
[handle
])
1708 if (index
== req
->num_outstanding_cmds
)
1711 /* Compute number of required data segments */
1712 /* Map the sg table so we have an accurate count of sg entries needed */
1713 if (scsi_sg_count(cmd
)) {
1714 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1715 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1716 if (unlikely(!nseg
))
1719 sp
->flags
|= SRB_DMA_VALID
;
1721 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1722 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1723 struct qla2_sgx sgx
;
1726 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1727 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1728 sgx
.cur_sg
= scsi_sglist(cmd
);
1732 while (qla24xx_get_one_block_sg(
1733 cmd
->device
->sector_size
, &sgx
, &partial
))
1739 /* number of required data segments */
1742 /* Compute number of required protection segments */
1743 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1744 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1745 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1746 if (unlikely(!nseg
))
1749 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1751 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1752 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1753 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1760 /* Total Data and protection sg segment(s) */
1761 tot_prot_dsds
= nseg
;
1763 if (req
->cnt
< (req_cnt
+ 2)) {
1764 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1765 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1766 if (req
->ring_index
< cnt
)
1767 req
->cnt
= cnt
- req
->ring_index
;
1769 req
->cnt
= req
->length
-
1770 (req
->ring_index
- cnt
);
1771 if (req
->cnt
< (req_cnt
+ 2))
1775 status
|= QDSS_GOT_Q_SPACE
;
1777 /* Build header part of command packet (excluding the OPCODE). */
1778 req
->current_outstanding_cmd
= handle
;
1779 req
->outstanding_cmds
[handle
] = sp
;
1780 sp
->handle
= handle
;
1781 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1782 req
->cnt
-= req_cnt
;
1784 /* Fill-in common area */
1785 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1786 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1788 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1789 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1791 /* Set NPORT-ID and LUN number*/
1792 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1793 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1794 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1795 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1797 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1798 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1800 /* Total Data and protection segment(s) */
1801 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1803 /* Build IOCB segments and adjust for data protection segments */
1804 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1805 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1809 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1810 /* Specify response queue number where completion should happen */
1811 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1812 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1815 /* Adjust ring index. */
1817 if (req
->ring_index
== req
->length
) {
1818 req
->ring_index
= 0;
1819 req
->ring_ptr
= req
->ring
;
1823 /* Set chip new ring index. */
1824 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1825 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1827 /* Manage unprocessed RIO/ZIO commands in response queue. */
1828 if (vha
->flags
.process_response_queue
&&
1829 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1830 qla24xx_process_response_queue(vha
, rsp
);
1832 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1837 if (status
& QDSS_GOT_Q_SPACE
) {
1838 req
->outstanding_cmds
[handle
] = NULL
;
1839 req
->cnt
+= req_cnt
;
1841 /* Cleanup will be performed by the caller (queuecommand) */
1843 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1844 return QLA_FUNCTION_FAILED
;
1848 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1850 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1851 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1852 int affinity
= cmd
->request
->cpu
;
1854 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1855 affinity
< ha
->max_rsp_queues
- 1)
1856 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1858 *rsp
= ha
->rsp_q_map
[0];
1861 /* Generic Control-SRB manipulation functions. */
1863 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1865 struct qla_hw_data
*ha
= vha
->hw
;
1866 struct req_que
*req
= ha
->req_q_map
[0];
1867 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1868 uint32_t index
, handle
;
1870 uint16_t cnt
, req_cnt
;
1877 goto skip_cmd_array
;
1879 /* Check for room in outstanding command list. */
1880 handle
= req
->current_outstanding_cmd
;
1881 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1883 if (handle
== req
->num_outstanding_cmds
)
1885 if (!req
->outstanding_cmds
[handle
])
1888 if (index
== req
->num_outstanding_cmds
) {
1889 ql_log(ql_log_warn
, vha
, 0x700b,
1890 "No room on outstanding cmd array.\n");
1894 /* Prep command array. */
1895 req
->current_outstanding_cmd
= handle
;
1896 req
->outstanding_cmds
[handle
] = sp
;
1897 sp
->handle
= handle
;
1899 /* Adjust entry-counts as needed. */
1900 if (sp
->type
!= SRB_SCSI_CMD
)
1901 req_cnt
= sp
->iocbs
;
1904 /* Check for room on request queue. */
1905 if (req
->cnt
< req_cnt
) {
1906 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
))
1907 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1908 else if (IS_P3P_TYPE(ha
))
1909 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1910 else if (IS_FWI2_CAPABLE(ha
))
1911 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1912 else if (IS_QLAFX00(ha
))
1913 cnt
= RD_REG_DWORD(®
->ispfx00
.req_q_out
);
1915 cnt
= qla2x00_debounce_register(
1916 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1918 if (req
->ring_index
< cnt
)
1919 req
->cnt
= cnt
- req
->ring_index
;
1921 req
->cnt
= req
->length
-
1922 (req
->ring_index
- cnt
);
1924 if (req
->cnt
< req_cnt
)
1928 req
->cnt
-= req_cnt
;
1929 pkt
= req
->ring_ptr
;
1930 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1931 if (IS_QLAFX00(ha
)) {
1932 WRT_REG_BYTE((void __iomem
*)&pkt
->entry_count
, req_cnt
);
1933 WRT_REG_WORD((void __iomem
*)&pkt
->handle
, handle
);
1935 pkt
->entry_count
= req_cnt
;
1936 pkt
->handle
= handle
;
1944 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1946 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1948 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1949 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1950 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1951 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1952 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1953 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1954 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1955 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1956 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1957 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1958 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1962 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1964 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1965 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1968 mbx
->entry_type
= MBX_IOCB_TYPE
;
1969 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1970 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1971 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1972 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1973 if (HAS_EXTENDED_IDS(ha
)) {
1974 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1975 mbx
->mb10
= cpu_to_le16(opts
);
1977 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1979 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1980 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1981 sp
->fcport
->d_id
.b
.al_pa
);
1982 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
1986 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1988 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1989 logio
->control_flags
=
1990 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1991 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1992 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1993 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1994 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1995 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1999 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2001 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2003 mbx
->entry_type
= MBX_IOCB_TYPE
;
2004 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2005 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2006 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2007 cpu_to_le16(sp
->fcport
->loop_id
):
2008 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2009 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2010 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2011 sp
->fcport
->d_id
.b
.al_pa
);
2012 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2013 /* Implicit: mbx->mbx10 = 0. */
2017 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2019 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2020 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2021 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2022 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2026 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2028 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2030 mbx
->entry_type
= MBX_IOCB_TYPE
;
2031 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2032 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2033 if (HAS_EXTENDED_IDS(ha
)) {
2034 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2035 mbx
->mb10
= cpu_to_le16(BIT_0
);
2037 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2039 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2040 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2041 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2042 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2043 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2047 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2051 struct fc_port
*fcport
= sp
->fcport
;
2052 scsi_qla_host_t
*vha
= fcport
->vha
;
2053 struct qla_hw_data
*ha
= vha
->hw
;
2054 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2055 struct req_que
*req
= vha
->req
;
2057 flags
= iocb
->u
.tmf
.flags
;
2058 lun
= iocb
->u
.tmf
.lun
;
2060 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2061 tsk
->entry_count
= 1;
2062 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2063 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2064 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2065 tsk
->control_flags
= cpu_to_le32(flags
);
2066 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2067 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2068 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2069 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2071 if (flags
== TCF_LUN_RESET
) {
2072 int_to_scsilun(lun
, &tsk
->lun
);
2073 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2079 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2081 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2083 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2084 els_iocb
->entry_count
= 1;
2085 els_iocb
->sys_define
= 0;
2086 els_iocb
->entry_status
= 0;
2087 els_iocb
->handle
= sp
->handle
;
2088 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2089 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2090 els_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2091 els_iocb
->sof_type
= EST_SOFI3
;
2092 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2095 sp
->type
== SRB_ELS_CMD_RPT
?
2096 bsg_job
->request
->rqst_data
.r_els
.els_code
:
2097 bsg_job
->request
->rqst_data
.h_els
.command_code
;
2098 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2099 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2100 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2101 els_iocb
->control_flags
= 0;
2102 els_iocb
->rx_byte_count
=
2103 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2104 els_iocb
->tx_byte_count
=
2105 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2107 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2108 (bsg_job
->request_payload
.sg_list
)));
2109 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2110 (bsg_job
->request_payload
.sg_list
)));
2111 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2112 (bsg_job
->request_payload
.sg_list
));
2114 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2115 (bsg_job
->reply_payload
.sg_list
)));
2116 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2117 (bsg_job
->reply_payload
.sg_list
)));
2118 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2119 (bsg_job
->reply_payload
.sg_list
));
2121 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2125 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2127 uint16_t avail_dsds
;
2129 struct scatterlist
*sg
;
2132 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2133 struct qla_hw_data
*ha
= vha
->hw
;
2134 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2135 int loop_iterartion
= 0;
2136 int cont_iocb_prsnt
= 0;
2137 int entry_count
= 1;
2139 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2140 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2141 ct_iocb
->entry_status
= 0;
2142 ct_iocb
->handle1
= sp
->handle
;
2143 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2144 ct_iocb
->status
= __constant_cpu_to_le16(0);
2145 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
2146 ct_iocb
->timeout
= 0;
2147 ct_iocb
->cmd_dsd_count
=
2148 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2149 ct_iocb
->total_dsd_count
=
2150 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2151 ct_iocb
->req_bytecount
=
2152 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2153 ct_iocb
->rsp_bytecount
=
2154 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2156 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2157 (bsg_job
->request_payload
.sg_list
)));
2158 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2159 (bsg_job
->request_payload
.sg_list
)));
2160 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2162 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2163 (bsg_job
->reply_payload
.sg_list
)));
2164 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2165 (bsg_job
->reply_payload
.sg_list
)));
2166 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2169 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2171 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2173 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2175 cont_a64_entry_t
*cont_pkt
;
2177 /* Allocate additional continuation packets? */
2178 if (avail_dsds
== 0) {
2180 * Five DSDs are available in the Cont.
2183 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2184 vha
->hw
->req_q_map
[0]);
2185 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2187 cont_iocb_prsnt
= 1;
2191 sle_dma
= sg_dma_address(sg
);
2192 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2193 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2194 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2198 ct_iocb
->entry_count
= entry_count
;
2200 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2204 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2206 uint16_t avail_dsds
;
2208 struct scatterlist
*sg
;
2211 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2212 struct qla_hw_data
*ha
= vha
->hw
;
2213 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2214 int loop_iterartion
= 0;
2215 int cont_iocb_prsnt
= 0;
2216 int entry_count
= 1;
2218 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2219 ct_iocb
->entry_status
= 0;
2220 ct_iocb
->sys_define
= 0;
2221 ct_iocb
->handle
= sp
->handle
;
2223 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2224 ct_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2225 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
2227 ct_iocb
->cmd_dsd_count
=
2228 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2229 ct_iocb
->timeout
= 0;
2230 ct_iocb
->rsp_dsd_count
=
2231 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2232 ct_iocb
->rsp_byte_count
=
2233 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2234 ct_iocb
->cmd_byte_count
=
2235 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2236 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2237 (bsg_job
->request_payload
.sg_list
)));
2238 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2239 (bsg_job
->request_payload
.sg_list
)));
2240 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2241 (bsg_job
->request_payload
.sg_list
));
2244 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2246 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2248 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2250 cont_a64_entry_t
*cont_pkt
;
2252 /* Allocate additional continuation packets? */
2253 if (avail_dsds
== 0) {
2255 * Five DSDs are available in the Cont.
2258 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2260 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2262 cont_iocb_prsnt
= 1;
2266 sle_dma
= sg_dma_address(sg
);
2267 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2268 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2269 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2273 ct_iocb
->entry_count
= entry_count
;
2277 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2278 * @sp: command to send to the ISP
2280 * Returns non-zero if a failure occurred, else zero.
2283 qla82xx_start_scsi(srb_t
*sp
)
2286 unsigned long flags
;
2287 struct scsi_cmnd
*cmd
;
2294 struct device_reg_82xx __iomem
*reg
;
2297 uint8_t additional_cdb_len
;
2298 struct ct6_dsd
*ctx
;
2299 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2300 struct qla_hw_data
*ha
= vha
->hw
;
2301 struct req_que
*req
= NULL
;
2302 struct rsp_que
*rsp
= NULL
;
2305 /* Setup device pointers. */
2307 reg
= &ha
->iobase
->isp82
;
2308 cmd
= GET_CMD_SP(sp
);
2310 rsp
= ha
->rsp_q_map
[0];
2312 /* So we know we haven't pci_map'ed anything yet */
2315 dbval
= 0x04 | (ha
->portnum
<< 5);
2317 /* Send marker if required */
2318 if (vha
->marker_needed
!= 0) {
2319 if (qla2x00_marker(vha
, req
,
2320 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2321 ql_log(ql_log_warn
, vha
, 0x300c,
2322 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2323 return QLA_FUNCTION_FAILED
;
2325 vha
->marker_needed
= 0;
2328 /* Acquire ring specific lock */
2329 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2331 /* Check for room in outstanding command list. */
2332 handle
= req
->current_outstanding_cmd
;
2333 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2335 if (handle
== req
->num_outstanding_cmds
)
2337 if (!req
->outstanding_cmds
[handle
])
2340 if (index
== req
->num_outstanding_cmds
)
2343 /* Map the sg table so we have an accurate count of sg entries needed */
2344 if (scsi_sg_count(cmd
)) {
2345 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2346 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2347 if (unlikely(!nseg
))
2354 if (tot_dsds
> ql2xshiftctondsd
) {
2355 struct cmd_type_6
*cmd_pkt
;
2356 uint16_t more_dsd_lists
= 0;
2357 struct dsd_dma
*dsd_ptr
;
2360 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
2361 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
2362 ql_dbg(ql_dbg_io
, vha
, 0x300d,
2363 "Num of DSD list %d is than %d for cmd=%p.\n",
2364 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
2369 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
2370 goto sufficient_dsds
;
2372 more_dsd_lists
-= ha
->gbl_dsd_avail
;
2374 for (i
= 0; i
< more_dsd_lists
; i
++) {
2375 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
2377 ql_log(ql_log_fatal
, vha
, 0x300e,
2378 "Failed to allocate memory for dsd_dma "
2379 "for cmd=%p.\n", cmd
);
2383 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
2384 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
2385 if (!dsd_ptr
->dsd_addr
) {
2387 ql_log(ql_log_fatal
, vha
, 0x300f,
2388 "Failed to allocate memory for dsd_addr "
2389 "for cmd=%p.\n", cmd
);
2392 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
2393 ha
->gbl_dsd_avail
++;
2399 if (req
->cnt
< (req_cnt
+ 2)) {
2400 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2401 ®
->req_q_out
[0]);
2402 if (req
->ring_index
< cnt
)
2403 req
->cnt
= cnt
- req
->ring_index
;
2405 req
->cnt
= req
->length
-
2406 (req
->ring_index
- cnt
);
2407 if (req
->cnt
< (req_cnt
+ 2))
2411 ctx
= sp
->u
.scmd
.ctx
=
2412 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
2414 ql_log(ql_log_fatal
, vha
, 0x3010,
2415 "Failed to allocate ctx for cmd=%p.\n", cmd
);
2419 memset(ctx
, 0, sizeof(struct ct6_dsd
));
2420 ctx
->fcp_cmnd
= dma_pool_alloc(ha
->fcp_cmnd_dma_pool
,
2421 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
2422 if (!ctx
->fcp_cmnd
) {
2423 ql_log(ql_log_fatal
, vha
, 0x3011,
2424 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
2428 /* Initialize the DSD list and dma handle */
2429 INIT_LIST_HEAD(&ctx
->dsd_list
);
2430 ctx
->dsd_use_cnt
= 0;
2432 if (cmd
->cmd_len
> 16) {
2433 additional_cdb_len
= cmd
->cmd_len
- 16;
2434 if ((cmd
->cmd_len
% 4) != 0) {
2435 /* SCSI command bigger than 16 bytes must be
2438 ql_log(ql_log_warn
, vha
, 0x3012,
2439 "scsi cmd len %d not multiple of 4 "
2440 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
2441 goto queuing_error_fcp_cmnd
;
2443 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
2445 additional_cdb_len
= 0;
2446 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
2449 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
2450 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2452 /* Zero out remaining portion of packet. */
2453 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2454 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2455 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2456 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2458 /* Set NPORT-ID and LUN number*/
2459 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2460 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2461 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2462 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2463 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2465 /* Build IOCB segments */
2466 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
2467 goto queuing_error_fcp_cmnd
;
2469 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2470 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2472 /* build FCP_CMND IU */
2473 memset(ctx
->fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2474 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
2475 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
2477 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
2478 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
2479 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2480 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
2483 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2485 if (scsi_populate_tag_msg(cmd
, tag
)) {
2487 case HEAD_OF_QUEUE_TAG
:
2488 ctx
->fcp_cmnd
->task_attribute
=
2491 case ORDERED_QUEUE_TAG
:
2492 ctx
->fcp_cmnd
->task_attribute
=
2498 /* Populate the FCP_PRIO. */
2499 if (ha
->flags
.fcp_prio_enabled
)
2500 ctx
->fcp_cmnd
->task_attribute
|=
2501 sp
->fcport
->fcp_prio
<< 3;
2503 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2505 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
2506 additional_cdb_len
);
2507 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
2509 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
2510 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
2511 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
2512 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
2513 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
2515 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
2516 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2517 /* Set total data segment count. */
2518 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2519 /* Specify response queue number where
2520 * completion should happen
2522 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2524 struct cmd_type_7
*cmd_pkt
;
2525 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2526 if (req
->cnt
< (req_cnt
+ 2)) {
2527 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2528 ®
->req_q_out
[0]);
2529 if (req
->ring_index
< cnt
)
2530 req
->cnt
= cnt
- req
->ring_index
;
2532 req
->cnt
= req
->length
-
2533 (req
->ring_index
- cnt
);
2535 if (req
->cnt
< (req_cnt
+ 2))
2538 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
2539 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2541 /* Zero out remaining portion of packet. */
2542 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2543 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2544 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2545 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2547 /* Set NPORT-ID and LUN number*/
2548 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2549 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2550 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2551 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2552 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2554 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2555 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
2556 sizeof(cmd_pkt
->lun
));
2559 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2561 if (scsi_populate_tag_msg(cmd
, tag
)) {
2563 case HEAD_OF_QUEUE_TAG
:
2564 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
2566 case ORDERED_QUEUE_TAG
:
2567 cmd_pkt
->task
= TSK_ORDERED
;
2572 /* Populate the FCP_PRIO. */
2573 if (ha
->flags
.fcp_prio_enabled
)
2574 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
2576 /* Load SCSI command packet. */
2577 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2578 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2580 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2582 /* Build IOCB segments */
2583 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
2585 /* Set total data segment count. */
2586 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2587 /* Specify response queue number where
2588 * completion should happen.
2590 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2593 /* Build command packet. */
2594 req
->current_outstanding_cmd
= handle
;
2595 req
->outstanding_cmds
[handle
] = sp
;
2596 sp
->handle
= handle
;
2597 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2598 req
->cnt
-= req_cnt
;
2601 /* Adjust ring index. */
2603 if (req
->ring_index
== req
->length
) {
2604 req
->ring_index
= 0;
2605 req
->ring_ptr
= req
->ring
;
2609 sp
->flags
|= SRB_DMA_VALID
;
2611 /* Set chip new ring index. */
2612 /* write, read and verify logic */
2613 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
2615 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
2618 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2621 while (RD_REG_DWORD((void __iomem
*)ha
->nxdb_rd_ptr
) != dbval
) {
2623 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2629 /* Manage unprocessed RIO/ZIO commands in response queue. */
2630 if (vha
->flags
.process_response_queue
&&
2631 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2632 qla24xx_process_response_queue(vha
, rsp
);
2634 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2637 queuing_error_fcp_cmnd
:
2638 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
2641 scsi_dma_unmap(cmd
);
2643 if (sp
->u
.scmd
.ctx
) {
2644 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
2645 sp
->u
.scmd
.ctx
= NULL
;
2647 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2649 return QLA_FUNCTION_FAILED
;
2653 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
2655 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
2656 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2657 struct req_que
*req
= vha
->req
;
2659 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
2660 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
2661 abt_iocb
->entry_count
= 1;
2662 abt_iocb
->handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
2663 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2664 abt_iocb
->handle_to_abort
=
2665 cpu_to_le32(MAKE_HANDLE(req
->id
, aio
->u
.abt
.cmd_hndl
));
2666 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2667 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2668 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2669 abt_iocb
->vp_index
= vha
->vp_idx
;
2670 abt_iocb
->req_que_no
= cpu_to_le16(req
->id
);
2671 /* Send the command to the firmware */
2676 qla2x00_start_sp(srb_t
*sp
)
2679 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2681 unsigned long flags
;
2683 rval
= QLA_FUNCTION_FAILED
;
2684 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2685 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2687 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
2688 "qla2x00_alloc_iocbs failed.\n");
2695 IS_FWI2_CAPABLE(ha
) ?
2696 qla24xx_login_iocb(sp
, pkt
) :
2697 qla2x00_login_iocb(sp
, pkt
);
2699 case SRB_LOGOUT_CMD
:
2700 IS_FWI2_CAPABLE(ha
) ?
2701 qla24xx_logout_iocb(sp
, pkt
) :
2702 qla2x00_logout_iocb(sp
, pkt
);
2704 case SRB_ELS_CMD_RPT
:
2705 case SRB_ELS_CMD_HST
:
2706 qla24xx_els_iocb(sp
, pkt
);
2709 IS_FWI2_CAPABLE(ha
) ?
2710 qla24xx_ct_iocb(sp
, pkt
) :
2711 qla2x00_ct_iocb(sp
, pkt
);
2714 IS_FWI2_CAPABLE(ha
) ?
2715 qla24xx_adisc_iocb(sp
, pkt
) :
2716 qla2x00_adisc_iocb(sp
, pkt
);
2720 qlafx00_tm_iocb(sp
, pkt
) :
2721 qla24xx_tm_iocb(sp
, pkt
);
2723 case SRB_FXIOCB_DCMD
:
2724 case SRB_FXIOCB_BCMD
:
2725 qlafx00_fxdisc_iocb(sp
, pkt
);
2729 qlafx00_abort_iocb(sp
, pkt
) :
2730 qla24xx_abort_iocb(sp
, pkt
);
2737 qla2x00_start_iocbs(sp
->fcport
->vha
, ha
->req_q_map
[0]);
2739 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2744 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
2745 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
2747 uint16_t avail_dsds
;
2749 uint32_t req_data_len
= 0;
2750 uint32_t rsp_data_len
= 0;
2751 struct scatterlist
*sg
;
2753 int entry_count
= 1;
2754 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2756 /*Update entry type to indicate bidir command */
2757 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
2758 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL
);
2760 /* Set the transfer direction, in this set both flags
2761 * Also set the BD_WRAP_BACK flag, firmware will take care
2762 * assigning DID=SID for outgoing pkts.
2764 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2765 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2766 cmd_pkt
->control_flags
=
2767 __constant_cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
2770 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
2771 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
2772 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
2773 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
2775 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
2776 vha
->bidi_stats
.io_count
++;
2778 vha
->qla_stats
.output_bytes
+= req_data_len
;
2779 vha
->qla_stats
.output_requests
++;
2781 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2782 * are bundled in continuation iocb
2785 cur_dsd
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
2789 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
2790 bsg_job
->request_payload
.sg_cnt
, index
) {
2792 cont_a64_entry_t
*cont_pkt
;
2794 /* Allocate additional continuation packets */
2795 if (avail_dsds
== 0) {
2796 /* Continuation type 1 IOCB can accomodate
2799 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
2800 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2804 sle_dma
= sg_dma_address(sg
);
2805 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2806 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2807 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2810 /* For read request DSD will always goes to continuation IOCB
2811 * and follow the write DSD. If there is room on the current IOCB
2812 * then it is added to that IOCB else new continuation IOCB is
2815 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
2816 bsg_job
->reply_payload
.sg_cnt
, index
) {
2818 cont_a64_entry_t
*cont_pkt
;
2820 /* Allocate additional continuation packets */
2821 if (avail_dsds
== 0) {
2822 /* Continuation type 1 IOCB can accomodate
2825 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
2826 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2830 sle_dma
= sg_dma_address(sg
);
2831 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2832 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2833 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2836 /* This value should be same as number of IOCB required for this cmd */
2837 cmd_pkt
->entry_count
= entry_count
;
2841 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
2844 struct qla_hw_data
*ha
= vha
->hw
;
2845 unsigned long flags
;
2851 struct cmd_bidir
*cmd_pkt
= NULL
;
2852 struct rsp_que
*rsp
;
2853 struct req_que
*req
;
2854 int rval
= EXT_STATUS_OK
;
2858 rsp
= ha
->rsp_q_map
[0];
2861 /* Send marker if required */
2862 if (vha
->marker_needed
!= 0) {
2863 if (qla2x00_marker(vha
, req
,
2864 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
2865 return EXT_STATUS_MAILBOX
;
2866 vha
->marker_needed
= 0;
2869 /* Acquire ring specific lock */
2870 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2872 /* Check for room in outstanding command list. */
2873 handle
= req
->current_outstanding_cmd
;
2874 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2876 if (handle
== req
->num_outstanding_cmds
)
2878 if (!req
->outstanding_cmds
[handle
])
2882 if (index
== req
->num_outstanding_cmds
) {
2883 rval
= EXT_STATUS_BUSY
;
2887 /* Calculate number of IOCB required */
2888 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2890 /* Check for room on request queue. */
2891 if (req
->cnt
< req_cnt
+ 2) {
2892 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
2893 RD_REG_DWORD_RELAXED(req
->req_q_out
);
2894 if (req
->ring_index
< cnt
)
2895 req
->cnt
= cnt
- req
->ring_index
;
2897 req
->cnt
= req
->length
-
2898 (req
->ring_index
- cnt
);
2900 if (req
->cnt
< req_cnt
+ 2) {
2901 rval
= EXT_STATUS_BUSY
;
2905 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
2906 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2908 /* Zero out remaining portion of packet. */
2909 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2910 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2911 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2913 /* Set NPORT-ID (of vha)*/
2914 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
2915 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
2916 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
2917 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
2919 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
2920 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2921 /* Build command packet. */
2922 req
->current_outstanding_cmd
= handle
;
2923 req
->outstanding_cmds
[handle
] = sp
;
2924 sp
->handle
= handle
;
2925 req
->cnt
-= req_cnt
;
2927 /* Send the command to the firmware */
2929 qla2x00_start_iocbs(vha
, req
);
2931 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
This page took 0.095659 seconds and 5 git commands to generate.