[SCSI] qla2xxx: Implement FCP priority tagging for 82xx adapters.
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla25xx_set_que(srb_t *, struct rsp_que **);
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24 uint16_t cflags;
25
26 cflags = 0;
27
28 /* Set transfer direction */
29 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
30 cflags = CF_WRITE;
31 sp->fcport->vha->hw->qla_stats.output_bytes +=
32 scsi_bufflen(sp->cmd);
33 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ;
35 sp->fcport->vha->hw->qla_stats.input_bytes +=
36 scsi_bufflen(sp->cmd);
37 }
38 return (cflags);
39 }
40
41 /**
42 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43 * Continuation Type 0 IOCBs to allocate.
44 *
45 * @dsds: number of data segment decriptors needed
46 *
47 * Returns the number of IOCB entries needed to store @dsds.
48 */
49 uint16_t
50 qla2x00_calc_iocbs_32(uint16_t dsds)
51 {
52 uint16_t iocbs;
53
54 iocbs = 1;
55 if (dsds > 3) {
56 iocbs += (dsds - 3) / 7;
57 if ((dsds - 3) % 7)
58 iocbs++;
59 }
60 return (iocbs);
61 }
62
63 /**
64 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65 * Continuation Type 1 IOCBs to allocate.
66 *
67 * @dsds: number of data segment decriptors needed
68 *
69 * Returns the number of IOCB entries needed to store @dsds.
70 */
71 uint16_t
72 qla2x00_calc_iocbs_64(uint16_t dsds)
73 {
74 uint16_t iocbs;
75
76 iocbs = 1;
77 if (dsds > 2) {
78 iocbs += (dsds - 2) / 5;
79 if ((dsds - 2) % 5)
80 iocbs++;
81 }
82 return (iocbs);
83 }
84
85 /**
86 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87 * @ha: HA context
88 *
89 * Returns a pointer to the Continuation Type 0 IOCB packet.
90 */
91 static inline cont_entry_t *
92 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
93 {
94 cont_entry_t *cont_pkt;
95 struct req_que *req = vha->req;
96 /* Adjust ring index. */
97 req->ring_index++;
98 if (req->ring_index == req->length) {
99 req->ring_index = 0;
100 req->ring_ptr = req->ring;
101 } else {
102 req->ring_ptr++;
103 }
104
105 cont_pkt = (cont_entry_t *)req->ring_ptr;
106
107 /* Load packet defaults. */
108 *((uint32_t *)(&cont_pkt->entry_type)) =
109 __constant_cpu_to_le32(CONTINUE_TYPE);
110
111 return (cont_pkt);
112 }
113
114 /**
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116 * @ha: HA context
117 *
118 * Returns a pointer to the continuation type 1 IOCB packet.
119 */
120 static inline cont_a64_entry_t *
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
122 {
123 cont_a64_entry_t *cont_pkt;
124
125 /* Adjust ring index. */
126 req->ring_index++;
127 if (req->ring_index == req->length) {
128 req->ring_index = 0;
129 req->ring_ptr = req->ring;
130 } else {
131 req->ring_ptr++;
132 }
133
134 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
135
136 /* Load packet defaults. */
137 *((uint32_t *)(&cont_pkt->entry_type)) =
138 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139
140 return (cont_pkt);
141 }
142
143 static inline int
144 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145 {
146 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
147
148 /* We only support T10 DIF right now */
149 if (guard != SHOST_DIX_GUARD_CRC) {
150 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
152 return 0;
153 }
154
155 /* We always use DIFF Bundling for best performance */
156 *fw_prot_opts = 0;
157
158 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp->cmd)) {
160 case SCSI_PROT_READ_STRIP:
161 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162 break;
163 case SCSI_PROT_WRITE_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 break;
166 case SCSI_PROT_READ_INSERT:
167 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168 break;
169 case SCSI_PROT_WRITE_STRIP:
170 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
171 break;
172 case SCSI_PROT_READ_PASS:
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 case SCSI_PROT_WRITE_PASS:
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 default: /* Normal Request */
179 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 break;
181 }
182
183 return scsi_prot_sg_count(sp->cmd);
184 }
185
186 /*
187 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
188 * capable IOCB types.
189 *
190 * @sp: SRB command to process
191 * @cmd_pkt: Command type 2 IOCB
192 * @tot_dsds: Total number of segments to transfer
193 */
194 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195 uint16_t tot_dsds)
196 {
197 uint16_t avail_dsds;
198 uint32_t *cur_dsd;
199 scsi_qla_host_t *vha;
200 struct scsi_cmnd *cmd;
201 struct scatterlist *sg;
202 int i;
203
204 cmd = sp->cmd;
205
206 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt->entry_type)) =
208 __constant_cpu_to_le32(COMMAND_TYPE);
209
210 /* No data transfer */
211 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
212 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213 return;
214 }
215
216 vha = sp->fcport->vha;
217 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
218
219 /* Three DSDs are available in the Command Type 2 IOCB */
220 avail_dsds = 3;
221 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222
223 /* Load data segments */
224 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
225 cont_entry_t *cont_pkt;
226
227 /* Allocate additional continuation packets? */
228 if (avail_dsds == 0) {
229 /*
230 * Seven DSDs are available in the Continuation
231 * Type 0 IOCB.
232 */
233 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
234 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 avail_dsds = 7;
236 }
237
238 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
239 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
240 avail_dsds--;
241 }
242 }
243
244 /**
245 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
246 * capable IOCB types.
247 *
248 * @sp: SRB command to process
249 * @cmd_pkt: Command type 3 IOCB
250 * @tot_dsds: Total number of segments to transfer
251 */
252 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253 uint16_t tot_dsds)
254 {
255 uint16_t avail_dsds;
256 uint32_t *cur_dsd;
257 scsi_qla_host_t *vha;
258 struct scsi_cmnd *cmd;
259 struct scatterlist *sg;
260 int i;
261
262 cmd = sp->cmd;
263
264 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt->entry_type)) =
266 __constant_cpu_to_le32(COMMAND_A64_TYPE);
267
268 /* No data transfer */
269 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
270 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271 return;
272 }
273
274 vha = sp->fcport->vha;
275 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
276
277 /* Two DSDs are available in the Command Type 3 IOCB */
278 avail_dsds = 2;
279 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280
281 /* Load data segments */
282 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283 dma_addr_t sle_dma;
284 cont_a64_entry_t *cont_pkt;
285
286 /* Allocate additional continuation packets? */
287 if (avail_dsds == 0) {
288 /*
289 * Five DSDs are available in the Continuation
290 * Type 1 IOCB.
291 */
292 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
293 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 avail_dsds = 5;
295 }
296
297 sle_dma = sg_dma_address(sg);
298 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
300 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
301 avail_dsds--;
302 }
303 }
304
305 /**
306 * qla2x00_start_scsi() - Send a SCSI command to the ISP
307 * @sp: command to send to the ISP
308 *
309 * Returns non-zero if a failure occurred, else zero.
310 */
311 int
312 qla2x00_start_scsi(srb_t *sp)
313 {
314 int ret, nseg;
315 unsigned long flags;
316 scsi_qla_host_t *vha;
317 struct scsi_cmnd *cmd;
318 uint32_t *clr_ptr;
319 uint32_t index;
320 uint32_t handle;
321 cmd_entry_t *cmd_pkt;
322 uint16_t cnt;
323 uint16_t req_cnt;
324 uint16_t tot_dsds;
325 struct device_reg_2xxx __iomem *reg;
326 struct qla_hw_data *ha;
327 struct req_que *req;
328 struct rsp_que *rsp;
329 char tag[2];
330
331 /* Setup device pointers. */
332 ret = 0;
333 vha = sp->fcport->vha;
334 ha = vha->hw;
335 reg = &ha->iobase->isp;
336 cmd = sp->cmd;
337 req = ha->req_q_map[0];
338 rsp = ha->rsp_q_map[0];
339 /* So we know we haven't pci_map'ed anything yet */
340 tot_dsds = 0;
341
342 /* Send marker if required */
343 if (vha->marker_needed != 0) {
344 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345 QLA_SUCCESS) {
346 return (QLA_FUNCTION_FAILED);
347 }
348 vha->marker_needed = 0;
349 }
350
351 /* Acquire ring specific lock */
352 spin_lock_irqsave(&ha->hardware_lock, flags);
353
354 /* Check for room in outstanding command list. */
355 handle = req->current_outstanding_cmd;
356 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357 handle++;
358 if (handle == MAX_OUTSTANDING_COMMANDS)
359 handle = 1;
360 if (!req->outstanding_cmds[handle])
361 break;
362 }
363 if (index == MAX_OUTSTANDING_COMMANDS)
364 goto queuing_error;
365
366 /* Map the sg table so we have an accurate count of sg entries needed */
367 if (scsi_sg_count(cmd)) {
368 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369 scsi_sg_count(cmd), cmd->sc_data_direction);
370 if (unlikely(!nseg))
371 goto queuing_error;
372 } else
373 nseg = 0;
374
375 tot_dsds = nseg;
376
377 /* Calculate the number of request entries needed. */
378 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379 if (req->cnt < (req_cnt + 2)) {
380 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
381 if (req->ring_index < cnt)
382 req->cnt = cnt - req->ring_index;
383 else
384 req->cnt = req->length -
385 (req->ring_index - cnt);
386 }
387 if (req->cnt < (req_cnt + 2))
388 goto queuing_error;
389
390 /* Build command packet */
391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
393 sp->handle = handle;
394 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 req->cnt -= req_cnt;
396
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
407
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) {
410 switch (tag[0]) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
414 break;
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 break;
419 default:
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 break;
423 }
424 }
425
426 /* Load SCSI command packet. */
427 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
428 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
429
430 /* Build IOCB segments */
431 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
432
433 /* Set total data segment count. */
434 cmd_pkt->entry_count = (uint8_t)req_cnt;
435 wmb();
436
437 /* Adjust ring index. */
438 req->ring_index++;
439 if (req->ring_index == req->length) {
440 req->ring_index = 0;
441 req->ring_ptr = req->ring;
442 } else
443 req->ring_ptr++;
444
445 sp->flags |= SRB_DMA_VALID;
446
447 /* Set chip new ring index. */
448 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
449 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
450
451 /* Manage unprocessed RIO/ZIO commands in response queue. */
452 if (vha->flags.process_response_queue &&
453 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454 qla2x00_process_response_queue(rsp);
455
456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
457 return (QLA_SUCCESS);
458
459 queuing_error:
460 if (tot_dsds)
461 scsi_dma_unmap(cmd);
462
463 spin_unlock_irqrestore(&ha->hardware_lock, flags);
464
465 return (QLA_FUNCTION_FAILED);
466 }
467
468 /**
469 * qla2x00_start_iocbs() - Execute the IOCB command
470 */
471 static void
472 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473 {
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477
478 if (IS_QLA82XX(ha)) {
479 qla82xx_start_iocbs(vha);
480 } else {
481 /* Adjust ring index. */
482 req->ring_index++;
483 if (req->ring_index == req->length) {
484 req->ring_index = 0;
485 req->ring_ptr = req->ring;
486 } else
487 req->ring_ptr++;
488
489 /* Set chip new ring index. */
490 if (ha->mqenable) {
491 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
492 RD_REG_DWORD(&ioreg->hccr);
493 } else if (IS_FWI2_CAPABLE(ha)) {
494 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
496 } else {
497 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
498 req->ring_index);
499 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
500 }
501 }
502 }
503
504 /**
505 * qla2x00_marker() - Send a marker IOCB to the firmware.
506 * @ha: HA context
507 * @loop_id: loop ID
508 * @lun: LUN
509 * @type: marker modifier
510 *
511 * Can be called from both normal and interrupt context.
512 *
513 * Returns non-zero if a failure occurred, else zero.
514 */
515 static int
516 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
517 struct rsp_que *rsp, uint16_t loop_id,
518 uint16_t lun, uint8_t type)
519 {
520 mrk_entry_t *mrk;
521 struct mrk_entry_24xx *mrk24;
522 struct qla_hw_data *ha = vha->hw;
523 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
524
525 mrk24 = NULL;
526 req = ha->req_q_map[0];
527 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
528 if (mrk == NULL) {
529 ql_log(ql_log_warn, base_vha, 0x3026,
530 "Failed to allocate Marker IOCB.\n");
531
532 return (QLA_FUNCTION_FAILED);
533 }
534
535 mrk->entry_type = MARKER_TYPE;
536 mrk->modifier = type;
537 if (type != MK_SYNC_ALL) {
538 if (IS_FWI2_CAPABLE(ha)) {
539 mrk24 = (struct mrk_entry_24xx *) mrk;
540 mrk24->nport_handle = cpu_to_le16(loop_id);
541 mrk24->lun[1] = LSB(lun);
542 mrk24->lun[2] = MSB(lun);
543 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
544 mrk24->vp_index = vha->vp_idx;
545 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
546 } else {
547 SET_TARGET_ID(ha, mrk->target, loop_id);
548 mrk->lun = cpu_to_le16(lun);
549 }
550 }
551 wmb();
552
553 qla2x00_start_iocbs(vha, req);
554
555 return (QLA_SUCCESS);
556 }
557
558 int
559 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
560 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
561 uint8_t type)
562 {
563 int ret;
564 unsigned long flags = 0;
565
566 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
567 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
568 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
569
570 return (ret);
571 }
572
573 /**
574 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
575 * Continuation Type 1 IOCBs to allocate.
576 *
577 * @dsds: number of data segment decriptors needed
578 *
579 * Returns the number of IOCB entries needed to store @dsds.
580 */
581 inline uint16_t
582 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
583 {
584 uint16_t iocbs;
585
586 iocbs = 1;
587 if (dsds > 1) {
588 iocbs += (dsds - 1) / 5;
589 if ((dsds - 1) % 5)
590 iocbs++;
591 }
592 return iocbs;
593 }
594
595 static inline int
596 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
597 uint16_t tot_dsds)
598 {
599 uint32_t *cur_dsd = NULL;
600 scsi_qla_host_t *vha;
601 struct qla_hw_data *ha;
602 struct scsi_cmnd *cmd;
603 struct scatterlist *cur_seg;
604 uint32_t *dsd_seg;
605 void *next_dsd;
606 uint8_t avail_dsds;
607 uint8_t first_iocb = 1;
608 uint32_t dsd_list_len;
609 struct dsd_dma *dsd_ptr;
610 struct ct6_dsd *ctx;
611
612 cmd = sp->cmd;
613
614 /* Update entry type to indicate Command Type 3 IOCB */
615 *((uint32_t *)(&cmd_pkt->entry_type)) =
616 __constant_cpu_to_le32(COMMAND_TYPE_6);
617
618 /* No data transfer */
619 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
620 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
621 return 0;
622 }
623
624 vha = sp->fcport->vha;
625 ha = vha->hw;
626
627 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->control_flags =
630 __constant_cpu_to_le16(CF_WRITE_DATA);
631 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
632 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
633 cmd_pkt->control_flags =
634 __constant_cpu_to_le16(CF_READ_DATA);
635 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
636 }
637
638 cur_seg = scsi_sglist(cmd);
639 ctx = sp->ctx;
640
641 while (tot_dsds) {
642 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
643 QLA_DSDS_PER_IOCB : tot_dsds;
644 tot_dsds -= avail_dsds;
645 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
646
647 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
648 struct dsd_dma, list);
649 next_dsd = dsd_ptr->dsd_addr;
650 list_del(&dsd_ptr->list);
651 ha->gbl_dsd_avail--;
652 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
653 ctx->dsd_use_cnt++;
654 ha->gbl_dsd_inuse++;
655
656 if (first_iocb) {
657 first_iocb = 0;
658 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
659 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
660 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
661 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
662 } else {
663 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
664 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
665 *cur_dsd++ = cpu_to_le32(dsd_list_len);
666 }
667 cur_dsd = (uint32_t *)next_dsd;
668 while (avail_dsds) {
669 dma_addr_t sle_dma;
670
671 sle_dma = sg_dma_address(cur_seg);
672 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
673 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
674 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
675 cur_seg = sg_next(cur_seg);
676 avail_dsds--;
677 }
678 }
679
680 /* Null termination */
681 *cur_dsd++ = 0;
682 *cur_dsd++ = 0;
683 *cur_dsd++ = 0;
684 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
685 return 0;
686 }
687
688 /*
689 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
690 * for Command Type 6.
691 *
692 * @dsds: number of data segment decriptors needed
693 *
694 * Returns the number of dsd list needed to store @dsds.
695 */
696 inline uint16_t
697 qla24xx_calc_dsd_lists(uint16_t dsds)
698 {
699 uint16_t dsd_lists = 0;
700
701 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
702 if (dsds % QLA_DSDS_PER_IOCB)
703 dsd_lists++;
704 return dsd_lists;
705 }
706
707
708 /**
709 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
710 * IOCB types.
711 *
712 * @sp: SRB command to process
713 * @cmd_pkt: Command type 3 IOCB
714 * @tot_dsds: Total number of segments to transfer
715 */
716 inline void
717 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
718 uint16_t tot_dsds)
719 {
720 uint16_t avail_dsds;
721 uint32_t *cur_dsd;
722 scsi_qla_host_t *vha;
723 struct scsi_cmnd *cmd;
724 struct scatterlist *sg;
725 int i;
726 struct req_que *req;
727
728 cmd = sp->cmd;
729
730 /* Update entry type to indicate Command Type 3 IOCB */
731 *((uint32_t *)(&cmd_pkt->entry_type)) =
732 __constant_cpu_to_le32(COMMAND_TYPE_7);
733
734 /* No data transfer */
735 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
736 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
737 return;
738 }
739
740 vha = sp->fcport->vha;
741 req = vha->req;
742
743 /* Set transfer direction */
744 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
745 cmd_pkt->task_mgmt_flags =
746 __constant_cpu_to_le16(TMF_WRITE_DATA);
747 sp->fcport->vha->hw->qla_stats.output_bytes +=
748 scsi_bufflen(sp->cmd);
749 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
750 cmd_pkt->task_mgmt_flags =
751 __constant_cpu_to_le16(TMF_READ_DATA);
752 sp->fcport->vha->hw->qla_stats.input_bytes +=
753 scsi_bufflen(sp->cmd);
754 }
755
756 /* One DSD is available in the Command Type 3 IOCB */
757 avail_dsds = 1;
758 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
759
760 /* Load data segments */
761
762 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
763 dma_addr_t sle_dma;
764 cont_a64_entry_t *cont_pkt;
765
766 /* Allocate additional continuation packets? */
767 if (avail_dsds == 0) {
768 /*
769 * Five DSDs are available in the Continuation
770 * Type 1 IOCB.
771 */
772 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
773 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
774 avail_dsds = 5;
775 }
776
777 sle_dma = sg_dma_address(sg);
778 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
779 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
780 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
781 avail_dsds--;
782 }
783 }
784
785 struct fw_dif_context {
786 uint32_t ref_tag;
787 uint16_t app_tag;
788 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
789 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
790 };
791
792 /*
793 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
794 *
795 */
796 static inline void
797 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
798 unsigned int protcnt)
799 {
800 struct scsi_cmnd *cmd = sp->cmd;
801 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
802
803 switch (scsi_get_prot_type(cmd)) {
804 case SCSI_PROT_DIF_TYPE0:
805 /*
806 * No check for ql2xenablehba_err_chk, as it would be an
807 * I/O error if hba tag generation is not done.
808 */
809 pkt->ref_tag = cpu_to_le32((uint32_t)
810 (0xffffffff & scsi_get_lba(cmd)));
811
812 if (!qla2x00_hba_err_chk_enabled(sp))
813 break;
814
815 pkt->ref_tag_mask[0] = 0xff;
816 pkt->ref_tag_mask[1] = 0xff;
817 pkt->ref_tag_mask[2] = 0xff;
818 pkt->ref_tag_mask[3] = 0xff;
819 break;
820
821 /*
822 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
823 * match LBA in CDB + N
824 */
825 case SCSI_PROT_DIF_TYPE2:
826 pkt->app_tag = __constant_cpu_to_le16(0);
827 pkt->app_tag_mask[0] = 0x0;
828 pkt->app_tag_mask[1] = 0x0;
829
830 pkt->ref_tag = cpu_to_le32((uint32_t)
831 (0xffffffff & scsi_get_lba(cmd)));
832
833 if (!qla2x00_hba_err_chk_enabled(sp))
834 break;
835
836 /* enable ALL bytes of the ref tag */
837 pkt->ref_tag_mask[0] = 0xff;
838 pkt->ref_tag_mask[1] = 0xff;
839 pkt->ref_tag_mask[2] = 0xff;
840 pkt->ref_tag_mask[3] = 0xff;
841 break;
842
843 /* For Type 3 protection: 16 bit GUARD only */
844 case SCSI_PROT_DIF_TYPE3:
845 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
846 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
847 0x00;
848 break;
849
850 /*
851 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
852 * 16 bit app tag.
853 */
854 case SCSI_PROT_DIF_TYPE1:
855 pkt->ref_tag = cpu_to_le32((uint32_t)
856 (0xffffffff & scsi_get_lba(cmd)));
857 pkt->app_tag = __constant_cpu_to_le16(0);
858 pkt->app_tag_mask[0] = 0x0;
859 pkt->app_tag_mask[1] = 0x0;
860
861 if (!qla2x00_hba_err_chk_enabled(sp))
862 break;
863
864 /* enable ALL bytes of the ref tag */
865 pkt->ref_tag_mask[0] = 0xff;
866 pkt->ref_tag_mask[1] = 0xff;
867 pkt->ref_tag_mask[2] = 0xff;
868 pkt->ref_tag_mask[3] = 0xff;
869 break;
870 }
871
872 ql_dbg(ql_dbg_io, vha, 0x3009,
873 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
874 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
875 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
876 scsi_get_prot_type(cmd), cmd);
877 }
878
879 struct qla2_sgx {
880 dma_addr_t dma_addr; /* OUT */
881 uint32_t dma_len; /* OUT */
882
883 uint32_t tot_bytes; /* IN */
884 struct scatterlist *cur_sg; /* IN */
885
886 /* for book keeping, bzero on initial invocation */
887 uint32_t bytes_consumed;
888 uint32_t num_bytes;
889 uint32_t tot_partial;
890
891 /* for debugging */
892 uint32_t num_sg;
893 srb_t *sp;
894 };
895
896 static int
897 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898 uint32_t *partial)
899 {
900 struct scatterlist *sg;
901 uint32_t cumulative_partial, sg_len;
902 dma_addr_t sg_dma_addr;
903
904 if (sgx->num_bytes == sgx->tot_bytes)
905 return 0;
906
907 sg = sgx->cur_sg;
908 cumulative_partial = sgx->tot_partial;
909
910 sg_dma_addr = sg_dma_address(sg);
911 sg_len = sg_dma_len(sg);
912
913 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916 sgx->dma_len = (blk_sz - cumulative_partial);
917 sgx->tot_partial = 0;
918 sgx->num_bytes += blk_sz;
919 *partial = 0;
920 } else {
921 sgx->dma_len = sg_len - sgx->bytes_consumed;
922 sgx->tot_partial += sgx->dma_len;
923 *partial = 1;
924 }
925
926 sgx->bytes_consumed += sgx->dma_len;
927
928 if (sg_len == sgx->bytes_consumed) {
929 sg = sg_next(sg);
930 sgx->num_sg++;
931 sgx->cur_sg = sg;
932 sgx->bytes_consumed = 0;
933 }
934
935 return 1;
936 }
937
938 static int
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940 uint32_t *dsd, uint16_t tot_dsds)
941 {
942 void *next_dsd;
943 uint8_t avail_dsds = 0;
944 uint32_t dsd_list_len;
945 struct dsd_dma *dsd_ptr;
946 struct scatterlist *sg_prot;
947 uint32_t *cur_dsd = dsd;
948 uint16_t used_dsds = tot_dsds;
949
950 uint32_t prot_int;
951 uint32_t partial;
952 struct qla2_sgx sgx;
953 dma_addr_t sle_dma;
954 uint32_t sle_dma_len, tot_prot_dma_len = 0;
955 struct scsi_cmnd *cmd = sp->cmd;
956
957 prot_int = cmd->device->sector_size;
958
959 memset(&sgx, 0, sizeof(struct qla2_sgx));
960 sgx.tot_bytes = scsi_bufflen(sp->cmd);
961 sgx.cur_sg = scsi_sglist(sp->cmd);
962 sgx.sp = sp;
963
964 sg_prot = scsi_prot_sglist(sp->cmd);
965
966 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
967
968 sle_dma = sgx.dma_addr;
969 sle_dma_len = sgx.dma_len;
970 alloc_and_fill:
971 /* Allocate additional continuation packets? */
972 if (avail_dsds == 0) {
973 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
974 QLA_DSDS_PER_IOCB : used_dsds;
975 dsd_list_len = (avail_dsds + 1) * 12;
976 used_dsds -= avail_dsds;
977
978 /* allocate tracking DS */
979 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
980 if (!dsd_ptr)
981 return 1;
982
983 /* allocate new list */
984 dsd_ptr->dsd_addr = next_dsd =
985 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
986 &dsd_ptr->dsd_list_dma);
987
988 if (!next_dsd) {
989 /*
990 * Need to cleanup only this dsd_ptr, rest
991 * will be done by sp_free_dma()
992 */
993 kfree(dsd_ptr);
994 return 1;
995 }
996
997 list_add_tail(&dsd_ptr->list,
998 &((struct crc_context *)sp->ctx)->dsd_list);
999
1000 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1001
1002 /* add new list to cmd iocb or last list */
1003 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1004 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1005 *cur_dsd++ = dsd_list_len;
1006 cur_dsd = (uint32_t *)next_dsd;
1007 }
1008 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1009 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1011 avail_dsds--;
1012
1013 if (partial == 0) {
1014 /* Got a full protection interval */
1015 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1016 sle_dma_len = 8;
1017
1018 tot_prot_dma_len += sle_dma_len;
1019 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1020 tot_prot_dma_len = 0;
1021 sg_prot = sg_next(sg_prot);
1022 }
1023
1024 partial = 1; /* So as to not re-enter this block */
1025 goto alloc_and_fill;
1026 }
1027 }
1028 /* Null termination */
1029 *cur_dsd++ = 0;
1030 *cur_dsd++ = 0;
1031 *cur_dsd++ = 0;
1032 return 0;
1033 }
1034
1035 static int
1036 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1037 uint16_t tot_dsds)
1038 {
1039 void *next_dsd;
1040 uint8_t avail_dsds = 0;
1041 uint32_t dsd_list_len;
1042 struct dsd_dma *dsd_ptr;
1043 struct scatterlist *sg;
1044 uint32_t *cur_dsd = dsd;
1045 int i;
1046 uint16_t used_dsds = tot_dsds;
1047 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
1048
1049 uint8_t *cp;
1050
1051 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
1052 dma_addr_t sle_dma;
1053
1054 /* Allocate additional continuation packets? */
1055 if (avail_dsds == 0) {
1056 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057 QLA_DSDS_PER_IOCB : used_dsds;
1058 dsd_list_len = (avail_dsds + 1) * 12;
1059 used_dsds -= avail_dsds;
1060
1061 /* allocate tracking DS */
1062 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1063 if (!dsd_ptr)
1064 return 1;
1065
1066 /* allocate new list */
1067 dsd_ptr->dsd_addr = next_dsd =
1068 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069 &dsd_ptr->dsd_list_dma);
1070
1071 if (!next_dsd) {
1072 /*
1073 * Need to cleanup only this dsd_ptr, rest
1074 * will be done by sp_free_dma()
1075 */
1076 kfree(dsd_ptr);
1077 return 1;
1078 }
1079
1080 list_add_tail(&dsd_ptr->list,
1081 &((struct crc_context *)sp->ctx)->dsd_list);
1082
1083 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084
1085 /* add new list to cmd iocb or last list */
1086 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088 *cur_dsd++ = dsd_list_len;
1089 cur_dsd = (uint32_t *)next_dsd;
1090 }
1091 sle_dma = sg_dma_address(sg);
1092 ql_dbg(ql_dbg_io, vha, 0x300a,
1093 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1094 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1095 sp->cmd);
1096 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1098 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1099 avail_dsds--;
1100
1101 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1102 cp = page_address(sg_page(sg)) + sg->offset;
1103 ql_dbg(ql_dbg_io, vha, 0x300b,
1104 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
1105 }
1106 }
1107 /* Null termination */
1108 *cur_dsd++ = 0;
1109 *cur_dsd++ = 0;
1110 *cur_dsd++ = 0;
1111 return 0;
1112 }
1113
1114 static int
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116 uint32_t *dsd,
1117 uint16_t tot_dsds)
1118 {
1119 void *next_dsd;
1120 uint8_t avail_dsds = 0;
1121 uint32_t dsd_list_len;
1122 struct dsd_dma *dsd_ptr;
1123 struct scatterlist *sg;
1124 int i;
1125 struct scsi_cmnd *cmd;
1126 uint32_t *cur_dsd = dsd;
1127 uint16_t used_dsds = tot_dsds;
1128 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1129 uint8_t *cp;
1130
1131
1132 cmd = sp->cmd;
1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1134 dma_addr_t sle_dma;
1135
1136 /* Allocate additional continuation packets? */
1137 if (avail_dsds == 0) {
1138 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1139 QLA_DSDS_PER_IOCB : used_dsds;
1140 dsd_list_len = (avail_dsds + 1) * 12;
1141 used_dsds -= avail_dsds;
1142
1143 /* allocate tracking DS */
1144 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145 if (!dsd_ptr)
1146 return 1;
1147
1148 /* allocate new list */
1149 dsd_ptr->dsd_addr = next_dsd =
1150 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1151 &dsd_ptr->dsd_list_dma);
1152
1153 if (!next_dsd) {
1154 /*
1155 * Need to cleanup only this dsd_ptr, rest
1156 * will be done by sp_free_dma()
1157 */
1158 kfree(dsd_ptr);
1159 return 1;
1160 }
1161
1162 list_add_tail(&dsd_ptr->list,
1163 &((struct crc_context *)sp->ctx)->dsd_list);
1164
1165 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1166
1167 /* add new list to cmd iocb or last list */
1168 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1169 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1170 *cur_dsd++ = dsd_list_len;
1171 cur_dsd = (uint32_t *)next_dsd;
1172 }
1173 sle_dma = sg_dma_address(sg);
1174 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1175 ql_dbg(ql_dbg_io, vha, 0x3027,
1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n",
1178 __func__, cur_dsd, i,
1179 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1180 }
1181 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1184
1185 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1186 cp = page_address(sg_page(sg)) + sg->offset;
1187 ql_dbg(ql_dbg_io, vha, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__,
1189 cp);
1190 }
1191 avail_dsds--;
1192 }
1193 /* Null termination */
1194 *cur_dsd++ = 0;
1195 *cur_dsd++ = 0;
1196 *cur_dsd++ = 0;
1197 return 0;
1198 }
1199
1200 /**
1201 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1202 * Type 6 IOCB types.
1203 *
1204 * @sp: SRB command to process
1205 * @cmd_pkt: Command type 3 IOCB
1206 * @tot_dsds: Total number of segments to transfer
1207 */
1208 static inline int
1209 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1210 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1211 {
1212 uint32_t *cur_dsd, *fcp_dl;
1213 scsi_qla_host_t *vha;
1214 struct scsi_cmnd *cmd;
1215 struct scatterlist *cur_seg;
1216 int sgc;
1217 uint32_t total_bytes = 0;
1218 uint32_t data_bytes;
1219 uint32_t dif_bytes;
1220 uint8_t bundling = 1;
1221 uint16_t blk_size;
1222 uint8_t *clr_ptr;
1223 struct crc_context *crc_ctx_pkt = NULL;
1224 struct qla_hw_data *ha;
1225 uint8_t additional_fcpcdb_len;
1226 uint16_t fcp_cmnd_len;
1227 struct fcp_cmnd *fcp_cmnd;
1228 dma_addr_t crc_ctx_dma;
1229 char tag[2];
1230
1231 cmd = sp->cmd;
1232
1233 sgc = 0;
1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
1235 *((uint32_t *)(&cmd_pkt->entry_type)) =
1236 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1237
1238 vha = sp->fcport->vha;
1239 ha = vha->hw;
1240
1241 /* No data transfer */
1242 data_bytes = scsi_bufflen(cmd);
1243 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1244 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1245 return QLA_SUCCESS;
1246 }
1247
1248 cmd_pkt->vp_index = sp->fcport->vp_idx;
1249
1250 /* Set transfer direction */
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1252 cmd_pkt->control_flags =
1253 __constant_cpu_to_le16(CF_WRITE_DATA);
1254 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1255 cmd_pkt->control_flags =
1256 __constant_cpu_to_le16(CF_READ_DATA);
1257 }
1258
1259 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1260 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1261 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1262 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1263 bundling = 0;
1264
1265 /* Allocate CRC context from global pool */
1266 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1267 GFP_ATOMIC, &crc_ctx_dma);
1268
1269 if (!crc_ctx_pkt)
1270 goto crc_queuing_error;
1271
1272 /* Zero out CTX area. */
1273 clr_ptr = (uint8_t *)crc_ctx_pkt;
1274 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1275
1276 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1277
1278 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1279
1280 /* Set handle */
1281 crc_ctx_pkt->handle = cmd_pkt->handle;
1282
1283 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1284
1285 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1286 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1287
1288 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1289 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1290 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1291
1292 /* Determine SCSI command length -- align to 4 byte boundary */
1293 if (cmd->cmd_len > 16) {
1294 additional_fcpcdb_len = cmd->cmd_len - 16;
1295 if ((cmd->cmd_len % 4) != 0) {
1296 /* SCSI cmd > 16 bytes must be multiple of 4 */
1297 goto crc_queuing_error;
1298 }
1299 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1300 } else {
1301 additional_fcpcdb_len = 0;
1302 fcp_cmnd_len = 12 + 16 + 4;
1303 }
1304
1305 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1306
1307 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1308 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1309 fcp_cmnd->additional_cdb_len |= 1;
1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1311 fcp_cmnd->additional_cdb_len |= 2;
1312
1313 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1317 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1318 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1319 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1320 fcp_cmnd->task_management = 0;
1321
1322 /*
1323 * Update tagged queuing modifier if using command tag queuing
1324 */
1325 if (scsi_populate_tag_msg(cmd, tag)) {
1326 switch (tag[0]) {
1327 case HEAD_OF_QUEUE_TAG:
1328 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1329 break;
1330 case ORDERED_QUEUE_TAG:
1331 fcp_cmnd->task_attribute = TSK_ORDERED;
1332 break;
1333 default:
1334 fcp_cmnd->task_attribute = 0;
1335 break;
1336 }
1337 } else {
1338 fcp_cmnd->task_attribute = 0;
1339 }
1340
1341 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1342
1343 /* Compute dif len and adjust data len to incude protection */
1344 dif_bytes = 0;
1345 blk_size = cmd->device->sector_size;
1346 dif_bytes = (data_bytes / blk_size) * 8;
1347
1348 switch (scsi_get_prot_op(sp->cmd)) {
1349 case SCSI_PROT_READ_INSERT:
1350 case SCSI_PROT_WRITE_STRIP:
1351 total_bytes = data_bytes;
1352 data_bytes += dif_bytes;
1353 break;
1354
1355 case SCSI_PROT_READ_STRIP:
1356 case SCSI_PROT_WRITE_INSERT:
1357 case SCSI_PROT_READ_PASS:
1358 case SCSI_PROT_WRITE_PASS:
1359 total_bytes = data_bytes + dif_bytes;
1360 break;
1361 default:
1362 BUG();
1363 }
1364
1365 if (!qla2x00_hba_err_chk_enabled(sp))
1366 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1367
1368 if (!bundling) {
1369 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1370 } else {
1371 /*
1372 * Configure Bundling if we need to fetch interlaving
1373 * protection PCI accesses
1374 */
1375 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1376 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1377 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1378 tot_prot_dsds);
1379 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1380 }
1381
1382 /* Finish the common fields of CRC pkt */
1383 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1384 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1385 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1386 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1387 /* Fibre channel byte count */
1388 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1389 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1390 additional_fcpcdb_len);
1391 *fcp_dl = htonl(total_bytes);
1392
1393 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1394 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1395 return QLA_SUCCESS;
1396 }
1397 /* Walks data segments */
1398
1399 cmd_pkt->control_flags |=
1400 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1401
1402 if (!bundling && tot_prot_dsds) {
1403 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1404 cur_dsd, tot_dsds))
1405 goto crc_queuing_error;
1406 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1407 (tot_dsds - tot_prot_dsds)))
1408 goto crc_queuing_error;
1409
1410 if (bundling && tot_prot_dsds) {
1411 /* Walks dif segments */
1412 cur_seg = scsi_prot_sglist(cmd);
1413 cmd_pkt->control_flags |=
1414 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1415 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1416 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1417 tot_prot_dsds))
1418 goto crc_queuing_error;
1419 }
1420 return QLA_SUCCESS;
1421
1422 crc_queuing_error:
1423 /* Cleanup will be performed by the caller */
1424
1425 return QLA_FUNCTION_FAILED;
1426 }
1427
1428 /**
1429 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1430 * @sp: command to send to the ISP
1431 *
1432 * Returns non-zero if a failure occurred, else zero.
1433 */
1434 int
1435 qla24xx_start_scsi(srb_t *sp)
1436 {
1437 int ret, nseg;
1438 unsigned long flags;
1439 uint32_t *clr_ptr;
1440 uint32_t index;
1441 uint32_t handle;
1442 struct cmd_type_7 *cmd_pkt;
1443 uint16_t cnt;
1444 uint16_t req_cnt;
1445 uint16_t tot_dsds;
1446 struct req_que *req = NULL;
1447 struct rsp_que *rsp = NULL;
1448 struct scsi_cmnd *cmd = sp->cmd;
1449 struct scsi_qla_host *vha = sp->fcport->vha;
1450 struct qla_hw_data *ha = vha->hw;
1451 char tag[2];
1452
1453 /* Setup device pointers. */
1454 ret = 0;
1455
1456 qla25xx_set_que(sp, &rsp);
1457 req = vha->req;
1458
1459 /* So we know we haven't pci_map'ed anything yet */
1460 tot_dsds = 0;
1461
1462 /* Send marker if required */
1463 if (vha->marker_needed != 0) {
1464 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1465 QLA_SUCCESS)
1466 return QLA_FUNCTION_FAILED;
1467 vha->marker_needed = 0;
1468 }
1469
1470 /* Acquire ring specific lock */
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472
1473 /* Check for room in outstanding command list. */
1474 handle = req->current_outstanding_cmd;
1475 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1476 handle++;
1477 if (handle == MAX_OUTSTANDING_COMMANDS)
1478 handle = 1;
1479 if (!req->outstanding_cmds[handle])
1480 break;
1481 }
1482 if (index == MAX_OUTSTANDING_COMMANDS) {
1483 goto queuing_error;
1484 }
1485
1486 /* Map the sg table so we have an accurate count of sg entries needed */
1487 if (scsi_sg_count(cmd)) {
1488 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1489 scsi_sg_count(cmd), cmd->sc_data_direction);
1490 if (unlikely(!nseg))
1491 goto queuing_error;
1492 } else
1493 nseg = 0;
1494
1495 tot_dsds = nseg;
1496 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1497 if (req->cnt < (req_cnt + 2)) {
1498 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1499
1500 if (req->ring_index < cnt)
1501 req->cnt = cnt - req->ring_index;
1502 else
1503 req->cnt = req->length -
1504 (req->ring_index - cnt);
1505 }
1506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
1508
1509 /* Build command packet. */
1510 req->current_outstanding_cmd = handle;
1511 req->outstanding_cmds[handle] = sp;
1512 sp->handle = handle;
1513 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1514 req->cnt -= req_cnt;
1515
1516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1517 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1518
1519 /* Zero out remaining portion of packet. */
1520 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1521 clr_ptr = (uint32_t *)cmd_pkt + 2;
1522 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1523 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1524
1525 /* Set NPORT-ID and LUN number*/
1526 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx;
1531
1532 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1534
1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1536 if (scsi_populate_tag_msg(cmd, tag)) {
1537 switch (tag[0]) {
1538 case HEAD_OF_QUEUE_TAG:
1539 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1540 break;
1541 case ORDERED_QUEUE_TAG:
1542 cmd_pkt->task = TSK_ORDERED;
1543 break;
1544 }
1545 }
1546
1547 /* Load SCSI command packet. */
1548 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1549 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1550
1551 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1552
1553 /* Build IOCB segments */
1554 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1555
1556 /* Set total data segment count. */
1557 cmd_pkt->entry_count = (uint8_t)req_cnt;
1558 /* Specify response queue number where completion should happen */
1559 cmd_pkt->entry_status = (uint8_t) rsp->id;
1560 wmb();
1561 /* Adjust ring index. */
1562 req->ring_index++;
1563 if (req->ring_index == req->length) {
1564 req->ring_index = 0;
1565 req->ring_ptr = req->ring;
1566 } else
1567 req->ring_ptr++;
1568
1569 sp->flags |= SRB_DMA_VALID;
1570
1571 /* Set chip new ring index. */
1572 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1573 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1574
1575 /* Manage unprocessed RIO/ZIO commands in response queue. */
1576 if (vha->flags.process_response_queue &&
1577 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1578 qla24xx_process_response_queue(vha, rsp);
1579
1580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1581 return QLA_SUCCESS;
1582
1583 queuing_error:
1584 if (tot_dsds)
1585 scsi_dma_unmap(cmd);
1586
1587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1588
1589 return QLA_FUNCTION_FAILED;
1590 }
1591
1592
1593 /**
1594 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1595 * @sp: command to send to the ISP
1596 *
1597 * Returns non-zero if a failure occurred, else zero.
1598 */
1599 int
1600 qla24xx_dif_start_scsi(srb_t *sp)
1601 {
1602 int nseg;
1603 unsigned long flags;
1604 uint32_t *clr_ptr;
1605 uint32_t index;
1606 uint32_t handle;
1607 uint16_t cnt;
1608 uint16_t req_cnt = 0;
1609 uint16_t tot_dsds;
1610 uint16_t tot_prot_dsds;
1611 uint16_t fw_prot_opts = 0;
1612 struct req_que *req = NULL;
1613 struct rsp_que *rsp = NULL;
1614 struct scsi_cmnd *cmd = sp->cmd;
1615 struct scsi_qla_host *vha = sp->fcport->vha;
1616 struct qla_hw_data *ha = vha->hw;
1617 struct cmd_type_crc_2 *cmd_pkt;
1618 uint32_t status = 0;
1619
1620 #define QDSS_GOT_Q_SPACE BIT_0
1621
1622 /* Only process protection or >16 cdb in this routine */
1623 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1624 if (cmd->cmd_len <= 16)
1625 return qla24xx_start_scsi(sp);
1626 }
1627
1628 /* Setup device pointers. */
1629
1630 qla25xx_set_que(sp, &rsp);
1631 req = vha->req;
1632
1633 /* So we know we haven't pci_map'ed anything yet */
1634 tot_dsds = 0;
1635
1636 /* Send marker if required */
1637 if (vha->marker_needed != 0) {
1638 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1639 QLA_SUCCESS)
1640 return QLA_FUNCTION_FAILED;
1641 vha->marker_needed = 0;
1642 }
1643
1644 /* Acquire ring specific lock */
1645 spin_lock_irqsave(&ha->hardware_lock, flags);
1646
1647 /* Check for room in outstanding command list. */
1648 handle = req->current_outstanding_cmd;
1649 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1650 handle++;
1651 if (handle == MAX_OUTSTANDING_COMMANDS)
1652 handle = 1;
1653 if (!req->outstanding_cmds[handle])
1654 break;
1655 }
1656
1657 if (index == MAX_OUTSTANDING_COMMANDS)
1658 goto queuing_error;
1659
1660 /* Compute number of required data segments */
1661 /* Map the sg table so we have an accurate count of sg entries needed */
1662 if (scsi_sg_count(cmd)) {
1663 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1664 scsi_sg_count(cmd), cmd->sc_data_direction);
1665 if (unlikely(!nseg))
1666 goto queuing_error;
1667 else
1668 sp->flags |= SRB_DMA_VALID;
1669
1670 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1671 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1672 struct qla2_sgx sgx;
1673 uint32_t partial;
1674
1675 memset(&sgx, 0, sizeof(struct qla2_sgx));
1676 sgx.tot_bytes = scsi_bufflen(cmd);
1677 sgx.cur_sg = scsi_sglist(cmd);
1678 sgx.sp = sp;
1679
1680 nseg = 0;
1681 while (qla24xx_get_one_block_sg(
1682 cmd->device->sector_size, &sgx, &partial))
1683 nseg++;
1684 }
1685 } else
1686 nseg = 0;
1687
1688 /* number of required data segments */
1689 tot_dsds = nseg;
1690
1691 /* Compute number of required protection segments */
1692 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1693 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1694 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1695 if (unlikely(!nseg))
1696 goto queuing_error;
1697 else
1698 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1699
1700 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1701 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1702 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1703 }
1704 } else {
1705 nseg = 0;
1706 }
1707
1708 req_cnt = 1;
1709 /* Total Data and protection sg segment(s) */
1710 tot_prot_dsds = nseg;
1711 tot_dsds += nseg;
1712 if (req->cnt < (req_cnt + 2)) {
1713 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1714
1715 if (req->ring_index < cnt)
1716 req->cnt = cnt - req->ring_index;
1717 else
1718 req->cnt = req->length -
1719 (req->ring_index - cnt);
1720 }
1721
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE;
1726
1727 /* Build header part of command packet (excluding the OPCODE). */
1728 req->current_outstanding_cmd = handle;
1729 req->outstanding_cmds[handle] = sp;
1730 sp->handle = handle;
1731 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1732 req->cnt -= req_cnt;
1733
1734 /* Fill-in common area */
1735 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1736 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1737
1738 clr_ptr = (uint32_t *)cmd_pkt + 2;
1739 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1740
1741 /* Set NPORT-ID and LUN number*/
1742 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1743 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746
1747 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749
1750 /* Total Data and protection segment(s) */
1751 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1752
1753 /* Build IOCB segments and adjust for data protection segments */
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1755 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1756 QLA_SUCCESS)
1757 goto queuing_error;
1758
1759 cmd_pkt->entry_count = (uint8_t)req_cnt;
1760 /* Specify response queue number where completion should happen */
1761 cmd_pkt->entry_status = (uint8_t) rsp->id;
1762 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1763 wmb();
1764
1765 /* Adjust ring index. */
1766 req->ring_index++;
1767 if (req->ring_index == req->length) {
1768 req->ring_index = 0;
1769 req->ring_ptr = req->ring;
1770 } else
1771 req->ring_ptr++;
1772
1773 /* Set chip new ring index. */
1774 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1775 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1776
1777 /* Manage unprocessed RIO/ZIO commands in response queue. */
1778 if (vha->flags.process_response_queue &&
1779 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1780 qla24xx_process_response_queue(vha, rsp);
1781
1782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783
1784 return QLA_SUCCESS;
1785
1786 queuing_error:
1787 if (status & QDSS_GOT_Q_SPACE) {
1788 req->outstanding_cmds[handle] = NULL;
1789 req->cnt += req_cnt;
1790 }
1791 /* Cleanup will be performed by the caller (queuecommand) */
1792
1793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1794 return QLA_FUNCTION_FAILED;
1795 }
1796
1797
1798 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1799 {
1800 struct scsi_cmnd *cmd = sp->cmd;
1801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1802 int affinity = cmd->request->cpu;
1803
1804 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1805 affinity < ha->max_rsp_queues - 1)
1806 *rsp = ha->rsp_q_map[affinity + 1];
1807 else
1808 *rsp = ha->rsp_q_map[0];
1809 }
1810
1811 /* Generic Control-SRB manipulation functions. */
1812 void *
1813 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1814 {
1815 struct qla_hw_data *ha = vha->hw;
1816 struct req_que *req = ha->req_q_map[0];
1817 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1818 uint32_t index, handle;
1819 request_t *pkt;
1820 uint16_t cnt, req_cnt;
1821
1822 pkt = NULL;
1823 req_cnt = 1;
1824 handle = 0;
1825
1826 if (!sp)
1827 goto skip_cmd_array;
1828
1829 /* Check for room in outstanding command list. */
1830 handle = req->current_outstanding_cmd;
1831 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1832 handle++;
1833 if (handle == MAX_OUTSTANDING_COMMANDS)
1834 handle = 1;
1835 if (!req->outstanding_cmds[handle])
1836 break;
1837 }
1838 if (index == MAX_OUTSTANDING_COMMANDS) {
1839 ql_log(ql_log_warn, vha, 0x700b,
1840 "No room on oustanding cmd array.\n");
1841 goto queuing_error;
1842 }
1843
1844 /* Prep command array. */
1845 req->current_outstanding_cmd = handle;
1846 req->outstanding_cmds[handle] = sp;
1847 sp->handle = handle;
1848
1849 skip_cmd_array:
1850 /* Check for room on request queue. */
1851 if (req->cnt < req_cnt) {
1852 if (ha->mqenable)
1853 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1854 else if (IS_QLA82XX(ha))
1855 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1856 else if (IS_FWI2_CAPABLE(ha))
1857 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1858 else
1859 cnt = qla2x00_debounce_register(
1860 ISP_REQ_Q_OUT(ha, &reg->isp));
1861
1862 if (req->ring_index < cnt)
1863 req->cnt = cnt - req->ring_index;
1864 else
1865 req->cnt = req->length -
1866 (req->ring_index - cnt);
1867 }
1868 if (req->cnt < req_cnt)
1869 goto queuing_error;
1870
1871 /* Prep packet */
1872 req->cnt -= req_cnt;
1873 pkt = req->ring_ptr;
1874 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1875 pkt->entry_count = req_cnt;
1876 pkt->handle = handle;
1877
1878 queuing_error:
1879 return pkt;
1880 }
1881
1882 static void
1883 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1884 {
1885 struct srb_ctx *ctx = sp->ctx;
1886 struct srb_iocb *lio = ctx->u.iocb_cmd;
1887
1888 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1889 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1890 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1891 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1892 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1893 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1894 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1895 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1896 logio->port_id[1] = sp->fcport->d_id.b.area;
1897 logio->port_id[2] = sp->fcport->d_id.b.domain;
1898 logio->vp_index = sp->fcport->vp_idx;
1899 }
1900
1901 static void
1902 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1903 {
1904 struct qla_hw_data *ha = sp->fcport->vha->hw;
1905 struct srb_ctx *ctx = sp->ctx;
1906 struct srb_iocb *lio = ctx->u.iocb_cmd;
1907 uint16_t opts;
1908
1909 mbx->entry_type = MBX_IOCB_TYPE;
1910 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1911 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1912 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1913 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1914 if (HAS_EXTENDED_IDS(ha)) {
1915 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1916 mbx->mb10 = cpu_to_le16(opts);
1917 } else {
1918 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1919 }
1920 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1921 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1922 sp->fcport->d_id.b.al_pa);
1923 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1924 }
1925
1926 static void
1927 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1928 {
1929 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1930 logio->control_flags =
1931 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1932 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1933 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1934 logio->port_id[1] = sp->fcport->d_id.b.area;
1935 logio->port_id[2] = sp->fcport->d_id.b.domain;
1936 logio->vp_index = sp->fcport->vp_idx;
1937 }
1938
1939 static void
1940 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1941 {
1942 struct qla_hw_data *ha = sp->fcport->vha->hw;
1943
1944 mbx->entry_type = MBX_IOCB_TYPE;
1945 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1946 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1947 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1948 cpu_to_le16(sp->fcport->loop_id):
1949 cpu_to_le16(sp->fcport->loop_id << 8);
1950 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1951 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1952 sp->fcport->d_id.b.al_pa);
1953 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1954 /* Implicit: mbx->mbx10 = 0. */
1955 }
1956
1957 static void
1958 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1959 {
1960 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1961 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1962 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1963 logio->vp_index = sp->fcport->vp_idx;
1964 }
1965
1966 static void
1967 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1968 {
1969 struct qla_hw_data *ha = sp->fcport->vha->hw;
1970
1971 mbx->entry_type = MBX_IOCB_TYPE;
1972 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1973 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1974 if (HAS_EXTENDED_IDS(ha)) {
1975 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1976 mbx->mb10 = cpu_to_le16(BIT_0);
1977 } else {
1978 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1979 }
1980 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1981 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1982 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1983 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1984 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1985 }
1986
1987 static void
1988 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1989 {
1990 uint32_t flags;
1991 unsigned int lun;
1992 struct fc_port *fcport = sp->fcport;
1993 scsi_qla_host_t *vha = fcport->vha;
1994 struct qla_hw_data *ha = vha->hw;
1995 struct srb_ctx *ctx = sp->ctx;
1996 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1997 struct req_que *req = vha->req;
1998
1999 flags = iocb->u.tmf.flags;
2000 lun = iocb->u.tmf.lun;
2001
2002 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2003 tsk->entry_count = 1;
2004 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2005 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2006 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2007 tsk->control_flags = cpu_to_le32(flags);
2008 tsk->port_id[0] = fcport->d_id.b.al_pa;
2009 tsk->port_id[1] = fcport->d_id.b.area;
2010 tsk->port_id[2] = fcport->d_id.b.domain;
2011 tsk->vp_index = fcport->vp_idx;
2012
2013 if (flags == TCF_LUN_RESET) {
2014 int_to_scsilun(lun, &tsk->lun);
2015 host_to_fcp_swap((uint8_t *)&tsk->lun,
2016 sizeof(tsk->lun));
2017 }
2018 }
2019
2020 static void
2021 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2022 {
2023 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2024
2025 els_iocb->entry_type = ELS_IOCB_TYPE;
2026 els_iocb->entry_count = 1;
2027 els_iocb->sys_define = 0;
2028 els_iocb->entry_status = 0;
2029 els_iocb->handle = sp->handle;
2030 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2031 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2032 els_iocb->vp_index = sp->fcport->vp_idx;
2033 els_iocb->sof_type = EST_SOFI3;
2034 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2035
2036 els_iocb->opcode =
2037 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
2038 bsg_job->request->rqst_data.r_els.els_code :
2039 bsg_job->request->rqst_data.h_els.command_code;
2040 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2041 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2042 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2043 els_iocb->control_flags = 0;
2044 els_iocb->rx_byte_count =
2045 cpu_to_le32(bsg_job->reply_payload.payload_len);
2046 els_iocb->tx_byte_count =
2047 cpu_to_le32(bsg_job->request_payload.payload_len);
2048
2049 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2050 (bsg_job->request_payload.sg_list)));
2051 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2052 (bsg_job->request_payload.sg_list)));
2053 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2054 (bsg_job->request_payload.sg_list));
2055
2056 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2057 (bsg_job->reply_payload.sg_list)));
2058 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2059 (bsg_job->reply_payload.sg_list)));
2060 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2061 (bsg_job->reply_payload.sg_list));
2062 }
2063
2064 static void
2065 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2066 {
2067 uint16_t avail_dsds;
2068 uint32_t *cur_dsd;
2069 struct scatterlist *sg;
2070 int index;
2071 uint16_t tot_dsds;
2072 scsi_qla_host_t *vha = sp->fcport->vha;
2073 struct qla_hw_data *ha = vha->hw;
2074 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2075 int loop_iterartion = 0;
2076 int cont_iocb_prsnt = 0;
2077 int entry_count = 1;
2078
2079 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2080 ct_iocb->entry_type = CT_IOCB_TYPE;
2081 ct_iocb->entry_status = 0;
2082 ct_iocb->handle1 = sp->handle;
2083 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2084 ct_iocb->status = __constant_cpu_to_le16(0);
2085 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2086 ct_iocb->timeout = 0;
2087 ct_iocb->cmd_dsd_count =
2088 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2089 ct_iocb->total_dsd_count =
2090 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2091 ct_iocb->req_bytecount =
2092 cpu_to_le32(bsg_job->request_payload.payload_len);
2093 ct_iocb->rsp_bytecount =
2094 cpu_to_le32(bsg_job->reply_payload.payload_len);
2095
2096 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2097 (bsg_job->request_payload.sg_list)));
2098 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2099 (bsg_job->request_payload.sg_list)));
2100 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2101
2102 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2103 (bsg_job->reply_payload.sg_list)));
2104 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2105 (bsg_job->reply_payload.sg_list)));
2106 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2107
2108 avail_dsds = 1;
2109 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2110 index = 0;
2111 tot_dsds = bsg_job->reply_payload.sg_cnt;
2112
2113 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2114 dma_addr_t sle_dma;
2115 cont_a64_entry_t *cont_pkt;
2116
2117 /* Allocate additional continuation packets? */
2118 if (avail_dsds == 0) {
2119 /*
2120 * Five DSDs are available in the Cont.
2121 * Type 1 IOCB.
2122 */
2123 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2124 vha->hw->req_q_map[0]);
2125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2126 avail_dsds = 5;
2127 cont_iocb_prsnt = 1;
2128 entry_count++;
2129 }
2130
2131 sle_dma = sg_dma_address(sg);
2132 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2133 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2134 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2135 loop_iterartion++;
2136 avail_dsds--;
2137 }
2138 ct_iocb->entry_count = entry_count;
2139 }
2140
2141 static void
2142 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2143 {
2144 uint16_t avail_dsds;
2145 uint32_t *cur_dsd;
2146 struct scatterlist *sg;
2147 int index;
2148 uint16_t tot_dsds;
2149 scsi_qla_host_t *vha = sp->fcport->vha;
2150 struct qla_hw_data *ha = vha->hw;
2151 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2152 int loop_iterartion = 0;
2153 int cont_iocb_prsnt = 0;
2154 int entry_count = 1;
2155
2156 ct_iocb->entry_type = CT_IOCB_TYPE;
2157 ct_iocb->entry_status = 0;
2158 ct_iocb->sys_define = 0;
2159 ct_iocb->handle = sp->handle;
2160
2161 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2162 ct_iocb->vp_index = sp->fcport->vp_idx;
2163 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2164
2165 ct_iocb->cmd_dsd_count =
2166 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2167 ct_iocb->timeout = 0;
2168 ct_iocb->rsp_dsd_count =
2169 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2170 ct_iocb->rsp_byte_count =
2171 cpu_to_le32(bsg_job->reply_payload.payload_len);
2172 ct_iocb->cmd_byte_count =
2173 cpu_to_le32(bsg_job->request_payload.payload_len);
2174 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2175 (bsg_job->request_payload.sg_list)));
2176 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2177 (bsg_job->request_payload.sg_list)));
2178 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2179 (bsg_job->request_payload.sg_list));
2180
2181 avail_dsds = 1;
2182 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2183 index = 0;
2184 tot_dsds = bsg_job->reply_payload.sg_cnt;
2185
2186 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2187 dma_addr_t sle_dma;
2188 cont_a64_entry_t *cont_pkt;
2189
2190 /* Allocate additional continuation packets? */
2191 if (avail_dsds == 0) {
2192 /*
2193 * Five DSDs are available in the Cont.
2194 * Type 1 IOCB.
2195 */
2196 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2197 ha->req_q_map[0]);
2198 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2199 avail_dsds = 5;
2200 cont_iocb_prsnt = 1;
2201 entry_count++;
2202 }
2203
2204 sle_dma = sg_dma_address(sg);
2205 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2206 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2207 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2208 loop_iterartion++;
2209 avail_dsds--;
2210 }
2211 ct_iocb->entry_count = entry_count;
2212 }
2213
2214 /*
2215 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2216 * @sp: command to send to the ISP
2217 *
2218 * Returns non-zero if a failure occurred, else zero.
2219 */
2220 int
2221 qla82xx_start_scsi(srb_t *sp)
2222 {
2223 int ret, nseg;
2224 unsigned long flags;
2225 struct scsi_cmnd *cmd;
2226 uint32_t *clr_ptr;
2227 uint32_t index;
2228 uint32_t handle;
2229 uint16_t cnt;
2230 uint16_t req_cnt;
2231 uint16_t tot_dsds;
2232 struct device_reg_82xx __iomem *reg;
2233 uint32_t dbval;
2234 uint32_t *fcp_dl;
2235 uint8_t additional_cdb_len;
2236 struct ct6_dsd *ctx;
2237 struct scsi_qla_host *vha = sp->fcport->vha;
2238 struct qla_hw_data *ha = vha->hw;
2239 struct req_que *req = NULL;
2240 struct rsp_que *rsp = NULL;
2241 char tag[2];
2242
2243 /* Setup device pointers. */
2244 ret = 0;
2245 reg = &ha->iobase->isp82;
2246 cmd = sp->cmd;
2247 req = vha->req;
2248 rsp = ha->rsp_q_map[0];
2249
2250 /* So we know we haven't pci_map'ed anything yet */
2251 tot_dsds = 0;
2252
2253 dbval = 0x04 | (ha->portnum << 5);
2254
2255 /* Send marker if required */
2256 if (vha->marker_needed != 0) {
2257 if (qla2x00_marker(vha, req,
2258 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2259 ql_log(ql_log_warn, vha, 0x300c,
2260 "qla2x00_marker failed for cmd=%p.\n", cmd);
2261 return QLA_FUNCTION_FAILED;
2262 }
2263 vha->marker_needed = 0;
2264 }
2265
2266 /* Acquire ring specific lock */
2267 spin_lock_irqsave(&ha->hardware_lock, flags);
2268
2269 /* Check for room in outstanding command list. */
2270 handle = req->current_outstanding_cmd;
2271 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2272 handle++;
2273 if (handle == MAX_OUTSTANDING_COMMANDS)
2274 handle = 1;
2275 if (!req->outstanding_cmds[handle])
2276 break;
2277 }
2278 if (index == MAX_OUTSTANDING_COMMANDS)
2279 goto queuing_error;
2280
2281 /* Map the sg table so we have an accurate count of sg entries needed */
2282 if (scsi_sg_count(cmd)) {
2283 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2284 scsi_sg_count(cmd), cmd->sc_data_direction);
2285 if (unlikely(!nseg))
2286 goto queuing_error;
2287 } else
2288 nseg = 0;
2289
2290 tot_dsds = nseg;
2291
2292 if (tot_dsds > ql2xshiftctondsd) {
2293 struct cmd_type_6 *cmd_pkt;
2294 uint16_t more_dsd_lists = 0;
2295 struct dsd_dma *dsd_ptr;
2296 uint16_t i;
2297
2298 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2299 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2300 ql_dbg(ql_dbg_io, vha, 0x300d,
2301 "Num of DSD list %d is than %d for cmd=%p.\n",
2302 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2303 cmd);
2304 goto queuing_error;
2305 }
2306
2307 if (more_dsd_lists <= ha->gbl_dsd_avail)
2308 goto sufficient_dsds;
2309 else
2310 more_dsd_lists -= ha->gbl_dsd_avail;
2311
2312 for (i = 0; i < more_dsd_lists; i++) {
2313 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2314 if (!dsd_ptr) {
2315 ql_log(ql_log_fatal, vha, 0x300e,
2316 "Failed to allocate memory for dsd_dma "
2317 "for cmd=%p.\n", cmd);
2318 goto queuing_error;
2319 }
2320
2321 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2322 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2323 if (!dsd_ptr->dsd_addr) {
2324 kfree(dsd_ptr);
2325 ql_log(ql_log_fatal, vha, 0x300f,
2326 "Failed to allocate memory for dsd_addr "
2327 "for cmd=%p.\n", cmd);
2328 goto queuing_error;
2329 }
2330 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2331 ha->gbl_dsd_avail++;
2332 }
2333
2334 sufficient_dsds:
2335 req_cnt = 1;
2336
2337 if (req->cnt < (req_cnt + 2)) {
2338 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2339 &reg->req_q_out[0]);
2340 if (req->ring_index < cnt)
2341 req->cnt = cnt - req->ring_index;
2342 else
2343 req->cnt = req->length -
2344 (req->ring_index - cnt);
2345 }
2346
2347 if (req->cnt < (req_cnt + 2))
2348 goto queuing_error;
2349
2350 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2351 if (!sp->ctx) {
2352 ql_log(ql_log_fatal, vha, 0x3010,
2353 "Failed to allocate ctx for cmd=%p.\n", cmd);
2354 goto queuing_error;
2355 }
2356 memset(ctx, 0, sizeof(struct ct6_dsd));
2357 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2358 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2359 if (!ctx->fcp_cmnd) {
2360 ql_log(ql_log_fatal, vha, 0x3011,
2361 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2362 goto queuing_error_fcp_cmnd;
2363 }
2364
2365 /* Initialize the DSD list and dma handle */
2366 INIT_LIST_HEAD(&ctx->dsd_list);
2367 ctx->dsd_use_cnt = 0;
2368
2369 if (cmd->cmd_len > 16) {
2370 additional_cdb_len = cmd->cmd_len - 16;
2371 if ((cmd->cmd_len % 4) != 0) {
2372 /* SCSI command bigger than 16 bytes must be
2373 * multiple of 4
2374 */
2375 ql_log(ql_log_warn, vha, 0x3012,
2376 "scsi cmd len %d not multiple of 4 "
2377 "for cmd=%p.\n", cmd->cmd_len, cmd);
2378 goto queuing_error_fcp_cmnd;
2379 }
2380 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2381 } else {
2382 additional_cdb_len = 0;
2383 ctx->fcp_cmnd_len = 12 + 16 + 4;
2384 }
2385
2386 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2387 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2388
2389 /* Zero out remaining portion of packet. */
2390 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2391 clr_ptr = (uint32_t *)cmd_pkt + 2;
2392 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2393 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2394
2395 /* Set NPORT-ID and LUN number*/
2396 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2397 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2398 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2399 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2400 cmd_pkt->vp_index = sp->fcport->vp_idx;
2401
2402 /* Build IOCB segments */
2403 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2404 goto queuing_error_fcp_cmnd;
2405
2406 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2407 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2408
2409 /* build FCP_CMND IU */
2410 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2411 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2412 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2413
2414 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2415 ctx->fcp_cmnd->additional_cdb_len |= 1;
2416 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2417 ctx->fcp_cmnd->additional_cdb_len |= 2;
2418
2419 /*
2420 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2421 */
2422 if (scsi_populate_tag_msg(cmd, tag)) {
2423 switch (tag[0]) {
2424 case HEAD_OF_QUEUE_TAG:
2425 ctx->fcp_cmnd->task_attribute =
2426 TSK_HEAD_OF_QUEUE;
2427 break;
2428 case ORDERED_QUEUE_TAG:
2429 ctx->fcp_cmnd->task_attribute =
2430 TSK_ORDERED;
2431 break;
2432 }
2433 }
2434
2435 /* Populate the FCP_PRIO. */
2436 if (ha->flags.fcp_prio_enabled)
2437 ctx->fcp_cmnd->task_attribute |=
2438 sp->fcport->fcp_prio << 3;
2439
2440 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2441
2442 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2443 additional_cdb_len);
2444 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2445
2446 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2447 cmd_pkt->fcp_cmnd_dseg_address[0] =
2448 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2449 cmd_pkt->fcp_cmnd_dseg_address[1] =
2450 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2451
2452 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2453 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2454 /* Set total data segment count. */
2455 cmd_pkt->entry_count = (uint8_t)req_cnt;
2456 /* Specify response queue number where
2457 * completion should happen
2458 */
2459 cmd_pkt->entry_status = (uint8_t) rsp->id;
2460 } else {
2461 struct cmd_type_7 *cmd_pkt;
2462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2463 if (req->cnt < (req_cnt + 2)) {
2464 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2465 &reg->req_q_out[0]);
2466 if (req->ring_index < cnt)
2467 req->cnt = cnt - req->ring_index;
2468 else
2469 req->cnt = req->length -
2470 (req->ring_index - cnt);
2471 }
2472 if (req->cnt < (req_cnt + 2))
2473 goto queuing_error;
2474
2475 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2476 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2477
2478 /* Zero out remaining portion of packet. */
2479 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2480 clr_ptr = (uint32_t *)cmd_pkt + 2;
2481 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2482 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2483
2484 /* Set NPORT-ID and LUN number*/
2485 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2486 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2487 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2488 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2489 cmd_pkt->vp_index = sp->fcport->vp_idx;
2490
2491 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2492 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2493 sizeof(cmd_pkt->lun));
2494
2495 /*
2496 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2497 */
2498 if (scsi_populate_tag_msg(cmd, tag)) {
2499 switch (tag[0]) {
2500 case HEAD_OF_QUEUE_TAG:
2501 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2502 break;
2503 case ORDERED_QUEUE_TAG:
2504 cmd_pkt->task = TSK_ORDERED;
2505 break;
2506 }
2507 }
2508
2509 /* Populate the FCP_PRIO. */
2510 if (ha->flags.fcp_prio_enabled)
2511 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2512
2513 /* Load SCSI command packet. */
2514 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2515 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2516
2517 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2518
2519 /* Build IOCB segments */
2520 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2521
2522 /* Set total data segment count. */
2523 cmd_pkt->entry_count = (uint8_t)req_cnt;
2524 /* Specify response queue number where
2525 * completion should happen.
2526 */
2527 cmd_pkt->entry_status = (uint8_t) rsp->id;
2528
2529 }
2530 /* Build command packet. */
2531 req->current_outstanding_cmd = handle;
2532 req->outstanding_cmds[handle] = sp;
2533 sp->handle = handle;
2534 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2535 req->cnt -= req_cnt;
2536 wmb();
2537
2538 /* Adjust ring index. */
2539 req->ring_index++;
2540 if (req->ring_index == req->length) {
2541 req->ring_index = 0;
2542 req->ring_ptr = req->ring;
2543 } else
2544 req->ring_ptr++;
2545
2546 sp->flags |= SRB_DMA_VALID;
2547
2548 /* Set chip new ring index. */
2549 /* write, read and verify logic */
2550 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2551 if (ql2xdbwr)
2552 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2553 else {
2554 WRT_REG_DWORD(
2555 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2556 dbval);
2557 wmb();
2558 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2559 WRT_REG_DWORD(
2560 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2561 dbval);
2562 wmb();
2563 }
2564 }
2565
2566 /* Manage unprocessed RIO/ZIO commands in response queue. */
2567 if (vha->flags.process_response_queue &&
2568 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2569 qla24xx_process_response_queue(vha, rsp);
2570
2571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2572 return QLA_SUCCESS;
2573
2574 queuing_error_fcp_cmnd:
2575 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2576 queuing_error:
2577 if (tot_dsds)
2578 scsi_dma_unmap(cmd);
2579
2580 if (sp->ctx) {
2581 mempool_free(sp->ctx, ha->ctx_mempool);
2582 sp->ctx = NULL;
2583 }
2584 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2585
2586 return QLA_FUNCTION_FAILED;
2587 }
2588
2589 int
2590 qla2x00_start_sp(srb_t *sp)
2591 {
2592 int rval;
2593 struct qla_hw_data *ha = sp->fcport->vha->hw;
2594 void *pkt;
2595 struct srb_ctx *ctx = sp->ctx;
2596 unsigned long flags;
2597
2598 rval = QLA_FUNCTION_FAILED;
2599 spin_lock_irqsave(&ha->hardware_lock, flags);
2600 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2601 if (!pkt) {
2602 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2603 "qla2x00_alloc_iocbs failed.\n");
2604 goto done;
2605 }
2606
2607 rval = QLA_SUCCESS;
2608 switch (ctx->type) {
2609 case SRB_LOGIN_CMD:
2610 IS_FWI2_CAPABLE(ha) ?
2611 qla24xx_login_iocb(sp, pkt) :
2612 qla2x00_login_iocb(sp, pkt);
2613 break;
2614 case SRB_LOGOUT_CMD:
2615 IS_FWI2_CAPABLE(ha) ?
2616 qla24xx_logout_iocb(sp, pkt) :
2617 qla2x00_logout_iocb(sp, pkt);
2618 break;
2619 case SRB_ELS_CMD_RPT:
2620 case SRB_ELS_CMD_HST:
2621 qla24xx_els_iocb(sp, pkt);
2622 break;
2623 case SRB_CT_CMD:
2624 IS_FWI2_CAPABLE(ha) ?
2625 qla24xx_ct_iocb(sp, pkt) :
2626 qla2x00_ct_iocb(sp, pkt);
2627 break;
2628 case SRB_ADISC_CMD:
2629 IS_FWI2_CAPABLE(ha) ?
2630 qla24xx_adisc_iocb(sp, pkt) :
2631 qla2x00_adisc_iocb(sp, pkt);
2632 break;
2633 case SRB_TM_CMD:
2634 qla24xx_tm_iocb(sp, pkt);
2635 break;
2636 default:
2637 break;
2638 }
2639
2640 wmb();
2641 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2642 done:
2643 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2644 return rval;
2645 }
This page took 0.126138 seconds and 5 git commands to generate.