[SCSI] lpfc 8.3.39: Reduced spinlock contention on SCSI buffer list
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
d85296cf 1/*******************************************************************
dea3101e 2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
acd6859b 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
dea3101e 21#include <linux/pci.h>
5a0e3ad6 22#include <linux/slab.h>
dea3101e 23#include <linux/interrupt.h>
09703660 24#include <linux/export.h>
a90f5684 25#include <linux/delay.h>
e2a0a9d6 26#include <asm/unaligned.h>
737d4248
JS
27#include <linux/crc-t10dif.h>
28#include <net/checksum.h>
dea3101e 29
30#include <scsi/scsi.h>
31#include <scsi/scsi_device.h>
e2a0a9d6 32#include <scsi/scsi_eh.h>
dea3101e 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_version.h"
da0436e9 38#include "lpfc_hw4.h"
dea3101e 39#include "lpfc_hw.h"
40#include "lpfc_sli.h"
da0436e9 41#include "lpfc_sli4.h"
ea2151b4 42#include "lpfc_nl.h"
dea3101e 43#include "lpfc_disc.h"
dea3101e 44#include "lpfc.h"
9a6b09c0 45#include "lpfc_scsi.h"
dea3101e 46#include "lpfc_logmsg.h"
47#include "lpfc_crtn.h"
92d7f7b0 48#include "lpfc_vport.h"
dea3101e 49
50#define LPFC_RESET_WAIT 2
51#define LPFC_ABORT_WAIT 2
52
737d4248 53int _dump_buf_done = 1;
e2a0a9d6
JS
54
55static char *dif_op_str[] = {
9a6b09c0
JS
56 "PROT_NORMAL",
57 "PROT_READ_INSERT",
58 "PROT_WRITE_STRIP",
59 "PROT_READ_STRIP",
60 "PROT_WRITE_INSERT",
61 "PROT_READ_PASS",
62 "PROT_WRITE_PASS",
63};
64
f9bb2da1
JS
65struct scsi_dif_tuple {
66 __be16 guard_tag; /* Checksum */
67 __be16 app_tag; /* Opaque storage */
68 __be32 ref_tag; /* Target LBA or indirect LBA */
69};
70
da0436e9
JS
71static void
72lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
1c6f4ef5
JS
73static void
74lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
e2a0a9d6
JS
75
76static void
6a9c52cf 77lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
78{
79 void *src, *dst;
80 struct scatterlist *sgde = scsi_sglist(cmnd);
81
82 if (!_dump_buf_data) {
6a9c52cf
JS
83 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
84 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
85 __func__);
86 return;
87 }
88
89
90 if (!sgde) {
6a9c52cf
JS
91 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
92 "9051 BLKGRD: ERROR: data scatterlist is null\n");
e2a0a9d6
JS
93 return;
94 }
95
96 dst = (void *) _dump_buf_data;
97 while (sgde) {
98 src = sg_virt(sgde);
99 memcpy(dst, src, sgde->length);
100 dst += sgde->length;
101 sgde = sg_next(sgde);
102 }
103}
104
105static void
6a9c52cf 106lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
107{
108 void *src, *dst;
109 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
110
111 if (!_dump_buf_dif) {
6a9c52cf
JS
112 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
113 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
114 __func__);
115 return;
116 }
117
118 if (!sgde) {
6a9c52cf
JS
119 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
120 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
e2a0a9d6
JS
121 return;
122 }
123
124 dst = _dump_buf_dif;
125 while (sgde) {
126 src = sg_virt(sgde);
127 memcpy(dst, src, sgde->length);
128 dst += sgde->length;
129 sgde = sg_next(sgde);
130 }
131}
132
f1126688
JS
133/**
134 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
135 * @phba: Pointer to HBA object.
136 * @lpfc_cmd: lpfc scsi command object pointer.
137 *
138 * This function is called from the lpfc_prep_task_mgmt_cmd function to
139 * set the last bit in the response sge entry.
140 **/
141static void
142lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
143 struct lpfc_scsi_buf *lpfc_cmd)
144{
145 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
146 if (sgl) {
147 sgl += 1;
148 sgl->word2 = le32_to_cpu(sgl->word2);
149 bf_set(lpfc_sli4_sge_last, sgl, 1);
150 sgl->word2 = cpu_to_le32(sgl->word2);
151 }
152}
153
ea2151b4 154/**
3621a710 155 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
156 * @phba: Pointer to HBA object.
157 * @lpfc_cmd: lpfc scsi command object pointer.
158 *
159 * This function is called when there is a command completion and this
160 * function updates the statistical data for the command completion.
161 **/
162static void
163lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
164{
165 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
166 struct lpfc_nodelist *pnode = rdata->pnode;
167 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
168 unsigned long flags;
169 struct Scsi_Host *shost = cmd->device->host;
170 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
171 unsigned long latency;
172 int i;
173
174 if (cmd->result)
175 return;
176
9f1e1b50
JS
177 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
178
ea2151b4
JS
179 spin_lock_irqsave(shost->host_lock, flags);
180 if (!vport->stat_data_enabled ||
181 vport->stat_data_blocked ||
5989b8d4 182 !pnode ||
ea2151b4
JS
183 !pnode->lat_data ||
184 (phba->bucket_type == LPFC_NO_BUCKET)) {
185 spin_unlock_irqrestore(shost->host_lock, flags);
186 return;
187 }
ea2151b4
JS
188
189 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
190 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
191 phba->bucket_step;
9f1e1b50
JS
192 /* check array subscript bounds */
193 if (i < 0)
194 i = 0;
195 else if (i >= LPFC_MAX_BUCKET_COUNT)
196 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
197 } else {
198 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
199 if (latency <= (phba->bucket_base +
200 ((1<<i)*phba->bucket_step)))
201 break;
202 }
203
204 pnode->lat_data[i].cmd_count++;
205 spin_unlock_irqrestore(shost->host_lock, flags);
206}
207
ea2151b4 208/**
3621a710 209 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
ea2151b4
JS
210 * @phba: Pointer to HBA context object.
211 * @vport: Pointer to vport object.
212 * @ndlp: Pointer to FC node associated with the target.
213 * @lun: Lun number of the scsi device.
214 * @old_val: Old value of the queue depth.
215 * @new_val: New value of the queue depth.
216 *
217 * This function sends an event to the mgmt application indicating
218 * there is a change in the scsi device queue depth.
219 **/
220static void
221lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
222 struct lpfc_vport *vport,
223 struct lpfc_nodelist *ndlp,
224 uint32_t lun,
225 uint32_t old_val,
226 uint32_t new_val)
227{
228 struct lpfc_fast_path_event *fast_path_evt;
229 unsigned long flags;
230
231 fast_path_evt = lpfc_alloc_fast_evt(phba);
232 if (!fast_path_evt)
233 return;
234
235 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
236 FC_REG_SCSI_EVENT;
237 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
238 LPFC_EVENT_VARQUEDEPTH;
239
240 /* Report all luns with change in queue depth */
241 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
242 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
243 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
244 &ndlp->nlp_portname, sizeof(struct lpfc_name));
245 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
246 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
247 }
248
249 fast_path_evt->un.queue_depth_evt.oldval = old_val;
250 fast_path_evt->un.queue_depth_evt.newval = new_val;
251 fast_path_evt->vport = vport;
252
253 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
254 spin_lock_irqsave(&phba->hbalock, flags);
255 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
256 spin_unlock_irqrestore(&phba->hbalock, flags);
257 lpfc_worker_wake_up(phba);
258
259 return;
260}
261
5ffc266e
JS
262/**
263 * lpfc_change_queue_depth - Alter scsi device queue depth
264 * @sdev: Pointer the scsi device on which to change the queue depth.
265 * @qdepth: New queue depth to set the sdev to.
266 * @reason: The reason for the queue depth change.
267 *
268 * This function is called by the midlayer and the LLD to alter the queue
269 * depth for a scsi device. This function sets the queue depth to the new
270 * value and sends an event out to log the queue depth change.
271 **/
272int
273lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
274{
275 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
276 struct lpfc_hba *phba = vport->phba;
277 struct lpfc_rport_data *rdata;
278 unsigned long new_queue_depth, old_queue_depth;
279
280 old_queue_depth = sdev->queue_depth;
281 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
282 new_queue_depth = sdev->queue_depth;
283 rdata = sdev->hostdata;
284 if (rdata)
285 lpfc_send_sdev_queuedepth_change_event(phba, vport,
286 rdata->pnode, sdev->lun,
287 old_queue_depth,
288 new_queue_depth);
289 return sdev->queue_depth;
290}
291
fe8f7f9c
JS
292/**
293 * lpfc_change_queue_type() - Change a device's scsi tag queuing type
294 * @sdev: Pointer the scsi device whose queue depth is to change
295 * @tag_type: Identifier for queue tag type
296 */
297static int
298lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
299{
300 if (sdev->tagged_supported) {
301 scsi_set_tag_type(sdev, tag_type);
302 if (tag_type)
303 scsi_activate_tcq(sdev, sdev->queue_depth);
304 else
305 scsi_deactivate_tcq(sdev, sdev->queue_depth);
306 } else
307 tag_type = 0;
308
309 return tag_type;
310}
311
9bad7671 312/**
3621a710 313 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
314 * @phba: The Hba for which this call is being executed.
315 *
316 * This routine is called when there is resource error in driver or firmware.
317 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
318 * posts at most 1 event each second. This routine wakes up worker thread of
319 * @phba to process WORKER_RAM_DOWN_EVENT event.
320 *
321 * This routine should be called with no lock held.
322 **/
92d7f7b0 323void
eaf15d5b 324lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
325{
326 unsigned long flags;
5e9d9b82 327 uint32_t evt_posted;
92d7f7b0
JS
328
329 spin_lock_irqsave(&phba->hbalock, flags);
330 atomic_inc(&phba->num_rsrc_err);
331 phba->last_rsrc_error_time = jiffies;
332
333 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
334 spin_unlock_irqrestore(&phba->hbalock, flags);
335 return;
336 }
337
338 phba->last_ramp_down_time = jiffies;
339
340 spin_unlock_irqrestore(&phba->hbalock, flags);
341
342 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
343 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
344 if (!evt_posted)
92d7f7b0 345 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
346 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
347
5e9d9b82
JS
348 if (!evt_posted)
349 lpfc_worker_wake_up(phba);
92d7f7b0
JS
350 return;
351}
352
9bad7671 353/**
3621a710 354 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
9bad7671
JS
355 * @phba: The Hba for which this call is being executed.
356 *
357 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
358 * post at most 1 event every 5 minute after last_ramp_up_time or
359 * last_rsrc_error_time. This routine wakes up worker thread of @phba
360 * to process WORKER_RAM_DOWN_EVENT event.
361 *
362 * This routine should be called with no lock held.
363 **/
92d7f7b0 364static inline void
3de2a653 365lpfc_rampup_queue_depth(struct lpfc_vport *vport,
a257bf90 366 uint32_t queue_depth)
92d7f7b0
JS
367{
368 unsigned long flags;
3de2a653 369 struct lpfc_hba *phba = vport->phba;
5e9d9b82 370 uint32_t evt_posted;
92d7f7b0
JS
371 atomic_inc(&phba->num_cmd_success);
372
a257bf90 373 if (vport->cfg_lun_queue_depth <= queue_depth)
92d7f7b0 374 return;
92d7f7b0 375 spin_lock_irqsave(&phba->hbalock, flags);
5ffc266e
JS
376 if (time_before(jiffies,
377 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
378 time_before(jiffies,
379 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
92d7f7b0
JS
380 spin_unlock_irqrestore(&phba->hbalock, flags);
381 return;
382 }
92d7f7b0
JS
383 phba->last_ramp_up_time = jiffies;
384 spin_unlock_irqrestore(&phba->hbalock, flags);
385
386 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
387 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
388 if (!evt_posted)
92d7f7b0 389 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
390 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
391
5e9d9b82
JS
392 if (!evt_posted)
393 lpfc_worker_wake_up(phba);
394 return;
92d7f7b0
JS
395}
396
9bad7671 397/**
3621a710 398 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
399 * @phba: The Hba for which this call is being executed.
400 *
401 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
402 * thread.This routine reduces queue depth for all scsi device on each vport
403 * associated with @phba.
404 **/
92d7f7b0
JS
405void
406lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
407{
549e55cd
JS
408 struct lpfc_vport **vports;
409 struct Scsi_Host *shost;
92d7f7b0 410 struct scsi_device *sdev;
5ffc266e 411 unsigned long new_queue_depth;
92d7f7b0 412 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 413 int i;
92d7f7b0
JS
414
415 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
416 num_cmd_success = atomic_read(&phba->num_cmd_success);
417
75ad83a4
JS
418 /*
419 * The error and success command counters are global per
420 * driver instance. If another handler has already
421 * operated on this error event, just exit.
422 */
423 if (num_rsrc_err == 0)
424 return;
425
549e55cd
JS
426 vports = lpfc_create_vport_work_array(phba);
427 if (vports != NULL)
21e9a0a5 428 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
429 shost = lpfc_shost_from_vport(vports[i]);
430 shost_for_each_device(sdev, shost) {
92d7f7b0 431 new_queue_depth =
549e55cd
JS
432 sdev->queue_depth * num_rsrc_err /
433 (num_rsrc_err + num_cmd_success);
434 if (!new_queue_depth)
435 new_queue_depth = sdev->queue_depth - 1;
436 else
437 new_queue_depth = sdev->queue_depth -
438 new_queue_depth;
5ffc266e
JS
439 lpfc_change_queue_depth(sdev, new_queue_depth,
440 SCSI_QDEPTH_DEFAULT);
549e55cd 441 }
92d7f7b0 442 }
09372820 443 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
444 atomic_set(&phba->num_rsrc_err, 0);
445 atomic_set(&phba->num_cmd_success, 0);
446}
447
9bad7671 448/**
3621a710 449 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
9bad7671
JS
450 * @phba: The Hba for which this call is being executed.
451 *
452 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
453 * thread.This routine increases queue depth for all scsi device on each vport
454 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
455 * num_cmd_success to zero.
456 **/
92d7f7b0
JS
457void
458lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
459{
549e55cd
JS
460 struct lpfc_vport **vports;
461 struct Scsi_Host *shost;
92d7f7b0 462 struct scsi_device *sdev;
549e55cd
JS
463 int i;
464
465 vports = lpfc_create_vport_work_array(phba);
466 if (vports != NULL)
21e9a0a5 467 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
468 shost = lpfc_shost_from_vport(vports[i]);
469 shost_for_each_device(sdev, shost) {
97eab634
JS
470 if (vports[i]->cfg_lun_queue_depth <=
471 sdev->queue_depth)
472 continue;
5ffc266e
JS
473 lpfc_change_queue_depth(sdev,
474 sdev->queue_depth+1,
475 SCSI_QDEPTH_RAMP_UP);
549e55cd 476 }
92d7f7b0 477 }
09372820 478 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
479 atomic_set(&phba->num_rsrc_err, 0);
480 atomic_set(&phba->num_cmd_success, 0);
481}
482
a8e497d5 483/**
3621a710 484 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
485 * @phba: Pointer to HBA context object.
486 *
487 * This function walks vport list and set each SCSI host to block state
488 * by invoking fc_remote_port_delete() routine. This function is invoked
489 * with EEH when device's PCI slot has been permanently disabled.
490 **/
491void
492lpfc_scsi_dev_block(struct lpfc_hba *phba)
493{
494 struct lpfc_vport **vports;
495 struct Scsi_Host *shost;
496 struct scsi_device *sdev;
497 struct fc_rport *rport;
498 int i;
499
500 vports = lpfc_create_vport_work_array(phba);
501 if (vports != NULL)
21e9a0a5 502 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
503 shost = lpfc_shost_from_vport(vports[i]);
504 shost_for_each_device(sdev, shost) {
505 rport = starget_to_rport(scsi_target(sdev));
506 fc_remote_port_delete(rport);
507 }
508 }
509 lpfc_destroy_vport_work_array(phba, vports);
510}
511
9bad7671 512/**
3772a991 513 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 514 * @vport: The virtual port for which this call being executed.
3772a991 515 * @num_to_allocate: The requested number of buffers to allocate.
9bad7671 516 *
3772a991
JS
517 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
518 * the scsi buffer contains all the necessary information needed to initiate
519 * a SCSI I/O. The non-DMAable buffer region contains information to build
520 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
521 * and the initial BPL. In addition to allocating memory, the FCP CMND and
522 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
523 *
524 * Return codes:
3772a991
JS
525 * int - number of scsi buffers that were allocated.
526 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 527 **/
3772a991
JS
528static int
529lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 530{
2e0fef85 531 struct lpfc_hba *phba = vport->phba;
dea3101e 532 struct lpfc_scsi_buf *psb;
533 struct ulp_bde64 *bpl;
534 IOCB_t *iocb;
34b02dcd
JS
535 dma_addr_t pdma_phys_fcp_cmd;
536 dma_addr_t pdma_phys_fcp_rsp;
537 dma_addr_t pdma_phys_bpl;
604a3e30 538 uint16_t iotag;
96f7077f
JS
539 int bcnt, bpl_size;
540
541 bpl_size = phba->cfg_sg_dma_buf_size -
542 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
543
544 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
545 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
546 num_to_alloc, phba->cfg_sg_dma_buf_size,
547 (int)sizeof(struct fcp_cmnd),
548 (int)sizeof(struct fcp_rsp), bpl_size);
dea3101e 549
3772a991
JS
550 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
551 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
552 if (!psb)
553 break;
dea3101e 554
3772a991
JS
555 /*
556 * Get memory from the pci pool to map the virt space to pci
557 * bus space for an I/O. The DMA buffer includes space for the
558 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
559 * necessary to support the sg_tablesize.
560 */
561 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
562 GFP_KERNEL, &psb->dma_handle);
563 if (!psb->data) {
564 kfree(psb);
565 break;
566 }
567
568 /* Initialize virtual ptrs to dma_buf region. */
569 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
570
571 /* Allocate iotag for psb->cur_iocbq. */
572 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
573 if (iotag == 0) {
574 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
575 psb->data, psb->dma_handle);
576 kfree(psb);
577 break;
578 }
579 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
580
581 psb->fcp_cmnd = psb->data;
582 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
583 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 584 sizeof(struct fcp_rsp);
dea3101e 585
3772a991
JS
586 /* Initialize local short-hand pointers. */
587 bpl = psb->fcp_bpl;
588 pdma_phys_fcp_cmd = psb->dma_handle;
589 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
590 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
591 sizeof(struct fcp_rsp);
592
593 /*
594 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
595 * are sg list bdes. Initialize the first two and leave the
596 * rest for queuecommand.
597 */
598 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
599 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
600 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
601 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
602 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
603
604 /* Setup the physical region for the FCP RSP */
605 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
606 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
607 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
608 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
609 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
610
611 /*
612 * Since the IOCB for the FCP I/O is built into this
613 * lpfc_scsi_buf, initialize it with all known data now.
614 */
615 iocb = &psb->cur_iocbq.iocb;
616 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
617 if ((phba->sli_rev == 3) &&
618 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
619 /* fill in immediate fcp command BDE */
620 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
621 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
622 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
623 unsli3.fcp_ext.icd);
624 iocb->un.fcpi64.bdl.addrHigh = 0;
625 iocb->ulpBdeCount = 0;
626 iocb->ulpLe = 0;
25985edc 627 /* fill in response BDE */
3772a991
JS
628 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
629 BUFF_TYPE_BDE_64;
630 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
631 sizeof(struct fcp_rsp);
632 iocb->unsli3.fcp_ext.rbde.addrLow =
633 putPaddrLow(pdma_phys_fcp_rsp);
634 iocb->unsli3.fcp_ext.rbde.addrHigh =
635 putPaddrHigh(pdma_phys_fcp_rsp);
636 } else {
637 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
638 iocb->un.fcpi64.bdl.bdeSize =
639 (2 * sizeof(struct ulp_bde64));
640 iocb->un.fcpi64.bdl.addrLow =
641 putPaddrLow(pdma_phys_bpl);
642 iocb->un.fcpi64.bdl.addrHigh =
643 putPaddrHigh(pdma_phys_bpl);
644 iocb->ulpBdeCount = 1;
645 iocb->ulpLe = 1;
646 }
647 iocb->ulpClass = CLASS3;
648 psb->status = IOSTAT_SUCCESS;
da0436e9 649 /* Put it back into the SCSI buffer list */
eee8877e 650 psb->cur_iocbq.context1 = psb;
1c6f4ef5 651 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 652
34b02dcd 653 }
dea3101e 654
3772a991 655 return bcnt;
dea3101e 656}
657
1151e3ec
JS
658/**
659 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
660 * @vport: pointer to lpfc vport data structure.
661 *
662 * This routine is invoked by the vport cleanup for deletions and the cleanup
663 * for an ndlp on removal.
664 **/
665void
666lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
667{
668 struct lpfc_hba *phba = vport->phba;
669 struct lpfc_scsi_buf *psb, *next_psb;
670 unsigned long iflag = 0;
671
672 spin_lock_irqsave(&phba->hbalock, iflag);
673 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
674 list_for_each_entry_safe(psb, next_psb,
675 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
676 if (psb->rdata && psb->rdata->pnode
677 && psb->rdata->pnode->vport == vport)
678 psb->rdata = NULL;
679 }
680 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
681 spin_unlock_irqrestore(&phba->hbalock, iflag);
682}
683
da0436e9
JS
684/**
685 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
686 * @phba: pointer to lpfc hba data structure.
687 * @axri: pointer to the fcp xri abort wcqe structure.
688 *
689 * This routine is invoked by the worker thread to process a SLI4 fast-path
690 * FCP aborted xri.
691 **/
692void
693lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
694 struct sli4_wcqe_xri_aborted *axri)
695{
696 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 697 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
da0436e9
JS
698 struct lpfc_scsi_buf *psb, *next_psb;
699 unsigned long iflag = 0;
0f65ff68
JS
700 struct lpfc_iocbq *iocbq;
701 int i;
19ca7609
JS
702 struct lpfc_nodelist *ndlp;
703 int rrq_empty = 0;
589a52d6 704 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
da0436e9 705
0f65ff68
JS
706 spin_lock_irqsave(&phba->hbalock, iflag);
707 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
da0436e9
JS
708 list_for_each_entry_safe(psb, next_psb,
709 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
710 if (psb->cur_iocbq.sli4_xritag == xri) {
711 list_del(&psb->list);
341af102 712 psb->exch_busy = 0;
da0436e9 713 psb->status = IOSTAT_SUCCESS;
0f65ff68
JS
714 spin_unlock(
715 &phba->sli4_hba.abts_scsi_buf_list_lock);
1151e3ec
JS
716 if (psb->rdata && psb->rdata->pnode)
717 ndlp = psb->rdata->pnode;
718 else
719 ndlp = NULL;
720
19ca7609 721 rrq_empty = list_empty(&phba->active_rrq_list);
0f65ff68 722 spin_unlock_irqrestore(&phba->hbalock, iflag);
cb69f7de 723 if (ndlp) {
ee0f4fe1
JS
724 lpfc_set_rrq_active(phba, ndlp,
725 psb->cur_iocbq.sli4_lxritag, rxid, 1);
cb69f7de
JS
726 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
727 }
da0436e9 728 lpfc_release_scsi_buf_s4(phba, psb);
19ca7609
JS
729 if (rrq_empty)
730 lpfc_worker_wake_up(phba);
da0436e9
JS
731 return;
732 }
733 }
0f65ff68
JS
734 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
735 for (i = 1; i <= phba->sli.last_iotag; i++) {
736 iocbq = phba->sli.iocbq_lookup[i];
737
738 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
739 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
740 continue;
741 if (iocbq->sli4_xritag != xri)
742 continue;
743 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
744 psb->exch_busy = 0;
745 spin_unlock_irqrestore(&phba->hbalock, iflag);
0e9bb8d7 746 if (!list_empty(&pring->txq))
589a52d6 747 lpfc_worker_wake_up(phba);
0f65ff68
JS
748 return;
749
750 }
751 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
752}
753
754/**
8a9d2e80 755 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
da0436e9 756 * @phba: pointer to lpfc hba data structure.
8a9d2e80 757 * @post_sblist: pointer to the scsi buffer list.
da0436e9 758 *
8a9d2e80
JS
759 * This routine walks a list of scsi buffers that was passed in. It attempts
760 * to construct blocks of scsi buffer sgls which contains contiguous xris and
761 * uses the non-embedded SGL block post mailbox commands to post to the port.
762 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
763 * embedded SGL post mailbox command for posting. The @post_sblist passed in
764 * must be local list, thus no lock is needed when manipulate the list.
da0436e9 765 *
8a9d2e80 766 * Returns: 0 = failure, non-zero number of successfully posted buffers.
da0436e9
JS
767 **/
768int
8a9d2e80
JS
769lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
770 struct list_head *post_sblist, int sb_count)
da0436e9 771{
8a9d2e80 772 struct lpfc_scsi_buf *psb, *psb_next;
96f7077f 773 int status, sgl_size;
8a9d2e80
JS
774 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
775 dma_addr_t pdma_phys_bpl1;
776 int last_xritag = NO_XRI;
777 LIST_HEAD(prep_sblist);
778 LIST_HEAD(blck_sblist);
779 LIST_HEAD(scsi_sblist);
780
781 /* sanity check */
782 if (sb_count <= 0)
783 return -EINVAL;
784
96f7077f
JS
785 sgl_size = phba->cfg_sg_dma_buf_size -
786 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
787
8a9d2e80
JS
788 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
789 list_del_init(&psb->list);
790 block_cnt++;
791 if ((last_xritag != NO_XRI) &&
792 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
793 /* a hole in xri block, form a sgl posting block */
794 list_splice_init(&prep_sblist, &blck_sblist);
795 post_cnt = block_cnt - 1;
796 /* prepare list for next posting block */
797 list_add_tail(&psb->list, &prep_sblist);
798 block_cnt = 1;
799 } else {
800 /* prepare list for next posting block */
801 list_add_tail(&psb->list, &prep_sblist);
802 /* enough sgls for non-embed sgl mbox command */
803 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
804 list_splice_init(&prep_sblist, &blck_sblist);
805 post_cnt = block_cnt;
806 block_cnt = 0;
da0436e9 807 }
8a9d2e80
JS
808 }
809 num_posting++;
810 last_xritag = psb->cur_iocbq.sli4_xritag;
da0436e9 811
8a9d2e80
JS
812 /* end of repost sgl list condition for SCSI buffers */
813 if (num_posting == sb_count) {
814 if (post_cnt == 0) {
815 /* last sgl posting block */
816 list_splice_init(&prep_sblist, &blck_sblist);
817 post_cnt = block_cnt;
818 } else if (block_cnt == 1) {
819 /* last single sgl with non-contiguous xri */
96f7077f 820 if (sgl_size > SGL_PAGE_SIZE)
8a9d2e80
JS
821 pdma_phys_bpl1 = psb->dma_phys_bpl +
822 SGL_PAGE_SIZE;
823 else
824 pdma_phys_bpl1 = 0;
825 status = lpfc_sli4_post_sgl(phba,
826 psb->dma_phys_bpl,
827 pdma_phys_bpl1,
828 psb->cur_iocbq.sli4_xritag);
829 if (status) {
830 /* failure, put on abort scsi list */
831 psb->exch_busy = 1;
832 } else {
833 /* success, put on SCSI buffer list */
834 psb->exch_busy = 0;
835 psb->status = IOSTAT_SUCCESS;
836 num_posted++;
837 }
838 /* success, put on SCSI buffer sgl list */
839 list_add_tail(&psb->list, &scsi_sblist);
840 }
841 }
da0436e9 842
8a9d2e80
JS
843 /* continue until a nembed page worth of sgls */
844 if (post_cnt == 0)
da0436e9 845 continue;
8a9d2e80
JS
846
847 /* post block of SCSI buffer list sgls */
848 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
849 post_cnt);
850
851 /* don't reset xirtag due to hole in xri block */
852 if (block_cnt == 0)
853 last_xritag = NO_XRI;
854
855 /* reset SCSI buffer post count for next round of posting */
856 post_cnt = 0;
857
858 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
859 while (!list_empty(&blck_sblist)) {
860 list_remove_head(&blck_sblist, psb,
861 struct lpfc_scsi_buf, list);
da0436e9 862 if (status) {
8a9d2e80 863 /* failure, put on abort scsi list */
341af102 864 psb->exch_busy = 1;
341af102 865 } else {
8a9d2e80 866 /* success, put on SCSI buffer list */
341af102 867 psb->exch_busy = 0;
da0436e9 868 psb->status = IOSTAT_SUCCESS;
8a9d2e80 869 num_posted++;
341af102 870 }
8a9d2e80 871 list_add_tail(&psb->list, &scsi_sblist);
da0436e9
JS
872 }
873 }
8a9d2e80
JS
874 /* Push SCSI buffers with sgl posted to the availble list */
875 while (!list_empty(&scsi_sblist)) {
876 list_remove_head(&scsi_sblist, psb,
877 struct lpfc_scsi_buf, list);
878 lpfc_release_scsi_buf_s4(phba, psb);
879 }
880 return num_posted;
881}
882
883/**
884 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
885 * @phba: pointer to lpfc hba data structure.
886 *
887 * This routine walks the list of scsi buffers that have been allocated and
888 * repost them to the port by using SGL block post. This is needed after a
889 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
890 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
891 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
892 *
893 * Returns: 0 = success, non-zero failure.
894 **/
895int
896lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
897{
898 LIST_HEAD(post_sblist);
899 int num_posted, rc = 0;
900
901 /* get all SCSI buffers need to repost to a local list */
a40fc5f0
JS
902 spin_lock_irq(&phba->scsi_buf_list_get_lock);
903 spin_lock_irq(&phba->scsi_buf_list_put_lock);
904 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
905 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
906 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
907 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
908
909 /* post the list of scsi buffer sgls to port if available */
910 if (!list_empty(&post_sblist)) {
911 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
912 phba->sli4_hba.scsi_xri_cnt);
913 /* failed to post any scsi buffer, return error */
914 if (num_posted == 0)
915 rc = -EIO;
916 }
da0436e9
JS
917 return rc;
918}
919
920/**
921 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
922 * @vport: The virtual port for which this call being executed.
923 * @num_to_allocate: The requested number of buffers to allocate.
924 *
8a9d2e80 925 * This routine allocates scsi buffers for device with SLI-4 interface spec,
da0436e9 926 * the scsi buffer contains all the necessary information needed to initiate
8a9d2e80
JS
927 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
928 * them on a list, it post them to the port by using SGL block post.
da0436e9
JS
929 *
930 * Return codes:
8a9d2e80 931 * int - number of scsi buffers that were allocated and posted.
da0436e9
JS
932 * 0 = failure, less than num_to_alloc is a partial failure.
933 **/
934static int
935lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
936{
937 struct lpfc_hba *phba = vport->phba;
938 struct lpfc_scsi_buf *psb;
939 struct sli4_sge *sgl;
940 IOCB_t *iocb;
941 dma_addr_t pdma_phys_fcp_cmd;
942 dma_addr_t pdma_phys_fcp_rsp;
96f7077f 943 dma_addr_t pdma_phys_bpl;
8a9d2e80 944 uint16_t iotag, lxri = 0;
96f7077f 945 int bcnt, num_posted, sgl_size;
8a9d2e80
JS
946 LIST_HEAD(prep_sblist);
947 LIST_HEAD(post_sblist);
948 LIST_HEAD(scsi_sblist);
da0436e9 949
96f7077f
JS
950 sgl_size = phba->cfg_sg_dma_buf_size -
951 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
952
953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
954 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
955 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
956 (int)sizeof(struct fcp_cmnd),
957 (int)sizeof(struct fcp_rsp));
958
da0436e9
JS
959 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
960 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
961 if (!psb)
962 break;
da0436e9 963 /*
8a9d2e80
JS
964 * Get memory from the pci pool to map the virt space to
965 * pci bus space for an I/O. The DMA buffer includes space
966 * for the struct fcp_cmnd, struct fcp_rsp and the number
967 * of bde's necessary to support the sg_tablesize.
da0436e9
JS
968 */
969 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
970 GFP_KERNEL, &psb->dma_handle);
971 if (!psb->data) {
972 kfree(psb);
973 break;
974 }
da0436e9
JS
975 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
976
96f7077f
JS
977 /* Page alignment is CRITICAL, double check to be sure */
978 if (((unsigned long)(psb->data) &
979 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
980 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
981 psb->data, psb->dma_handle);
982 kfree(psb);
983 break;
984 }
985
da0436e9
JS
986 /* Allocate iotag for psb->cur_iocbq. */
987 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
988 if (iotag == 0) {
b92938b4
JS
989 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
990 psb->data, psb->dma_handle);
da0436e9
JS
991 kfree(psb);
992 break;
993 }
994
6d368e53
JS
995 lxri = lpfc_sli4_next_xritag(phba);
996 if (lxri == NO_XRI) {
da0436e9
JS
997 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
998 psb->data, psb->dma_handle);
999 kfree(psb);
1000 break;
1001 }
6d368e53
JS
1002 psb->cur_iocbq.sli4_lxritag = lxri;
1003 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
da0436e9 1004 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
da0436e9 1005 psb->fcp_bpl = psb->data;
96f7077f 1006 psb->fcp_cmnd = (psb->data + sgl_size);
da0436e9
JS
1007 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
1008 sizeof(struct fcp_cmnd));
1009
1010 /* Initialize local short-hand pointers. */
1011 sgl = (struct sli4_sge *)psb->fcp_bpl;
1012 pdma_phys_bpl = psb->dma_handle;
96f7077f 1013 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
da0436e9
JS
1014 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
1015
1016 /*
8a9d2e80
JS
1017 * The first two bdes are the FCP_CMD and FCP_RSP.
1018 * The balance are sg list bdes. Initialize the
1019 * first two and leave the rest for queuecommand.
da0436e9
JS
1020 */
1021 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
1022 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
0558056c 1023 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
1024 bf_set(lpfc_sli4_sge_last, sgl, 0);
1025 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 1026 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
da0436e9
JS
1027 sgl++;
1028
1029 /* Setup the physical region for the FCP RSP */
1030 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
1031 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
0558056c 1032 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
1033 bf_set(lpfc_sli4_sge_last, sgl, 1);
1034 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 1035 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
da0436e9
JS
1036
1037 /*
1038 * Since the IOCB for the FCP I/O is built into this
1039 * lpfc_scsi_buf, initialize it with all known data now.
1040 */
1041 iocb = &psb->cur_iocbq.iocb;
1042 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1043 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1044 /* setting the BLP size to 2 * sizeof BDE may not be correct.
1045 * We are setting the bpl to point to out sgl. An sgl's
1046 * entries are 16 bytes, a bpl entries are 12 bytes.
1047 */
1048 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1049 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1050 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1051 iocb->ulpBdeCount = 1;
1052 iocb->ulpLe = 1;
1053 iocb->ulpClass = CLASS3;
8a9d2e80 1054 psb->cur_iocbq.context1 = psb;
da0436e9 1055 psb->dma_phys_bpl = pdma_phys_bpl;
da0436e9 1056
8a9d2e80
JS
1057 /* add the scsi buffer to a post list */
1058 list_add_tail(&psb->list, &post_sblist);
a40fc5f0 1059 spin_lock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 1060 phba->sli4_hba.scsi_xri_cnt++;
a40fc5f0 1061 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
1062 }
1063 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1064 "3021 Allocate %d out of %d requested new SCSI "
1065 "buffers\n", bcnt, num_to_alloc);
1066
1067 /* post the list of scsi buffer sgls to port if available */
1068 if (!list_empty(&post_sblist))
1069 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1070 &post_sblist, bcnt);
1071 else
1072 num_posted = 0;
1073
1074 return num_posted;
da0436e9
JS
1075}
1076
9bad7671 1077/**
3772a991
JS
1078 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1079 * @vport: The virtual port for which this call being executed.
1080 * @num_to_allocate: The requested number of buffers to allocate.
1081 *
1082 * This routine wraps the actual SCSI buffer allocator function pointer from
1083 * the lpfc_hba struct.
1084 *
1085 * Return codes:
1086 * int - number of scsi buffers that were allocated.
1087 * 0 = failure, less than num_to_alloc is a partial failure.
1088 **/
1089static inline int
1090lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1091{
1092 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1093}
1094
1095/**
19ca7609 1096 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
3772a991 1097 * @phba: The HBA for which this call is being executed.
9bad7671
JS
1098 *
1099 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1100 * and returns to caller.
1101 *
1102 * Return codes:
1103 * NULL - Error
1104 * Pointer to lpfc_scsi_buf - Success
1105 **/
455c53ec 1106static struct lpfc_scsi_buf*
19ca7609 1107lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1108{
0bd4ca25 1109 struct lpfc_scsi_buf * lpfc_cmd = NULL;
a40fc5f0
JS
1110 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1111 unsigned long gflag = 0;
1112 unsigned long pflag = 0;
1113
1114 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1115 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1116 list);
1117 if (!lpfc_cmd) {
1118 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1119 list_splice(&phba->lpfc_scsi_buf_list_put,
1120 &phba->lpfc_scsi_buf_list_get);
1121 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1122 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1123 struct lpfc_scsi_buf, list);
1124 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1dcb58e5 1125 }
a40fc5f0 1126 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
0bd4ca25
JSEC
1127 return lpfc_cmd;
1128}
19ca7609
JS
1129/**
1130 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1131 * @phba: The HBA for which this call is being executed.
1132 *
1133 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1134 * and returns to caller.
1135 *
1136 * Return codes:
1137 * NULL - Error
1138 * Pointer to lpfc_scsi_buf - Success
1139 **/
1140static struct lpfc_scsi_buf*
1141lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1142{
1151e3ec 1143 struct lpfc_scsi_buf *lpfc_cmd ;
a40fc5f0
JS
1144 unsigned long gflag = 0;
1145 unsigned long pflag = 0;
19ca7609
JS
1146 int found = 0;
1147
a40fc5f0
JS
1148 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1149 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
19ca7609 1150 if (lpfc_test_rrq_active(phba, ndlp,
ee0f4fe1 1151 lpfc_cmd->cur_iocbq.sli4_lxritag))
1151e3ec
JS
1152 continue;
1153 list_del(&lpfc_cmd->list);
19ca7609 1154 found = 1;
1151e3ec 1155 break;
19ca7609 1156 }
a40fc5f0
JS
1157 if (!found) {
1158 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1159 list_splice(&phba->lpfc_scsi_buf_list_put,
1160 &phba->lpfc_scsi_buf_list_get);
1161 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1162 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1163 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
1164 list) {
1165 if (lpfc_test_rrq_active(
1166 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1167 continue;
1168 list_del(&lpfc_cmd->list);
1169 found = 1;
1170 break;
1171 }
1172 }
1173 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1151e3ec
JS
1174 if (!found)
1175 return NULL;
a40fc5f0 1176 return lpfc_cmd;
19ca7609
JS
1177}
1178/**
1179 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1180 * @phba: The HBA for which this call is being executed.
1181 *
1182 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1183 * and returns to caller.
1184 *
1185 * Return codes:
1186 * NULL - Error
1187 * Pointer to lpfc_scsi_buf - Success
1188 **/
1189static struct lpfc_scsi_buf*
1190lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1191{
1192 return phba->lpfc_get_scsi_buf(phba, ndlp);
1193}
dea3101e 1194
9bad7671 1195/**
3772a991 1196 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
1197 * @phba: The Hba for which this call is being executed.
1198 * @psb: The scsi buffer which is being released.
1199 *
1200 * This routine releases @psb scsi buffer by adding it to tail of @phba
1201 * lpfc_scsi_buf_list list.
1202 **/
0bd4ca25 1203static void
3772a991 1204lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 1205{
875fbdfe 1206 unsigned long iflag = 0;
dea3101e 1207
a40fc5f0
JS
1208 psb->seg_cnt = 0;
1209 psb->nonsg_phys = 0;
1210 psb->prot_seg_cnt = 0;
1211
1212 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
0bd4ca25 1213 psb->pCmd = NULL;
a40fc5f0
JS
1214 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1215 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
dea3101e 1216}
1217
da0436e9
JS
1218/**
1219 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1220 * @phba: The Hba for which this call is being executed.
1221 * @psb: The scsi buffer which is being released.
1222 *
1223 * This routine releases @psb scsi buffer by adding it to tail of @phba
1224 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1225 * and cannot be reused for at least RA_TOV amount of time if it was
1226 * aborted.
1227 **/
1228static void
1229lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1230{
1231 unsigned long iflag = 0;
1232
a40fc5f0
JS
1233 psb->seg_cnt = 0;
1234 psb->nonsg_phys = 0;
1235 psb->prot_seg_cnt = 0;
1236
341af102 1237 if (psb->exch_busy) {
da0436e9
JS
1238 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1239 iflag);
1240 psb->pCmd = NULL;
1241 list_add_tail(&psb->list,
1242 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1243 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1244 iflag);
1245 } else {
da0436e9 1246 psb->pCmd = NULL;
a40fc5f0
JS
1247 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1248 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1249 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
da0436e9
JS
1250 }
1251}
1252
9bad7671 1253/**
3772a991
JS
1254 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1255 * @phba: The Hba for which this call is being executed.
1256 * @psb: The scsi buffer which is being released.
1257 *
1258 * This routine releases @psb scsi buffer by adding it to tail of @phba
1259 * lpfc_scsi_buf_list list.
1260 **/
1261static void
1262lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1263{
1264
1265 phba->lpfc_release_scsi_buf(phba, psb);
1266}
1267
1268/**
1269 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
1270 * @phba: The Hba for which this call is being executed.
1271 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1272 *
1273 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991
JS
1274 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1275 * through sg elements and format the bdea. This routine also initializes all
1276 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
1277 *
1278 * Return codes:
1279 * 1 - Error
1280 * 0 - Success
1281 **/
dea3101e 1282static int
3772a991 1283lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e 1284{
1285 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1286 struct scatterlist *sgel = NULL;
1287 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1288 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
0f65ff68 1289 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 1290 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 1291 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 1292 dma_addr_t physaddr;
34b02dcd 1293 uint32_t num_bde = 0;
a0b4f78f 1294 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e 1295
1296 /*
1297 * There are three possibilities here - use scatter-gather segment, use
1298 * the single mapping, or neither. Start the lpfc command prep by
1299 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1300 * data bde entry.
1301 */
1302 bpl += 2;
c59fd9eb 1303 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 1304 /*
1305 * The driver stores the segment count returned from pci_map_sg
1306 * because this a count of dma-mappings used to map the use_sg
1307 * pages. They are not guaranteed to be the same for those
1308 * architectures that implement an IOMMU.
1309 */
dea3101e 1310
c59fd9eb
FT
1311 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1312 scsi_sg_count(scsi_cmnd), datadir);
1313 if (unlikely(!nseg))
1314 return 1;
1315
a0b4f78f 1316 lpfc_cmd->seg_cnt = nseg;
dea3101e 1317 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1318 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1319 "9064 BLKGRD: %s: Too many sg segments from "
e2a0a9d6 1320 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 1321 __func__, phba->cfg_sg_seg_cnt,
dea3101e 1322 lpfc_cmd->seg_cnt);
96f7077f 1323 lpfc_cmd->seg_cnt = 0;
a0b4f78f 1324 scsi_dma_unmap(scsi_cmnd);
dea3101e 1325 return 1;
1326 }
1327
1328 /*
1329 * The driver established a maximum scatter-gather segment count
1330 * during probe that limits the number of sg elements in any
1331 * single scsi command. Just run through the seg_cnt and format
1332 * the bde's.
34b02dcd
JS
1333 * When using SLI-3 the driver will try to fit all the BDEs into
1334 * the IOCB. If it can't then the BDEs get added to a BPL as it
1335 * does for SLI-2 mode.
dea3101e 1336 */
34b02dcd 1337 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 1338 physaddr = sg_dma_address(sgel);
34b02dcd 1339 if (phba->sli_rev == 3 &&
e2a0a9d6 1340 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 1341 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
1342 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1343 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1344 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1345 data_bde->addrLow = putPaddrLow(physaddr);
1346 data_bde->addrHigh = putPaddrHigh(physaddr);
1347 data_bde++;
1348 } else {
1349 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1350 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1351 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1352 bpl->addrLow =
1353 le32_to_cpu(putPaddrLow(physaddr));
1354 bpl->addrHigh =
1355 le32_to_cpu(putPaddrHigh(physaddr));
1356 bpl++;
1357 }
dea3101e 1358 }
c59fd9eb 1359 }
dea3101e 1360
1361 /*
1362 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
1363 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1364 * explicitly reinitialized and for SLI-3 the extended bde count is
1365 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 1366 */
e2a0a9d6 1367 if (phba->sli_rev == 3 &&
0f65ff68
JS
1368 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1369 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
1370 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1371 /*
1372 * The extended IOCB format can only fit 3 BDE or a BPL.
1373 * This I/O has more than 3 BDE so the 1st data bde will
1374 * be a BPL that is filled in here.
1375 */
1376 physaddr = lpfc_cmd->dma_handle;
1377 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1378 data_bde->tus.f.bdeSize = (num_bde *
1379 sizeof(struct ulp_bde64));
1380 physaddr += (sizeof(struct fcp_cmnd) +
1381 sizeof(struct fcp_rsp) +
1382 (2 * sizeof(struct ulp_bde64)));
1383 data_bde->addrHigh = putPaddrHigh(physaddr);
1384 data_bde->addrLow = putPaddrLow(physaddr);
25985edc 1385 /* ebde count includes the response bde and data bpl */
34b02dcd
JS
1386 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1387 } else {
25985edc 1388 /* ebde count includes the response bde and data bdes */
34b02dcd
JS
1389 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1390 }
1391 } else {
1392 iocb_cmd->un.fcpi64.bdl.bdeSize =
1393 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 1394 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 1395 }
09372820 1396 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
1397
1398 /*
1399 * Due to difference in data length between DIF/non-DIF paths,
1400 * we need to set word 4 of IOCB here
1401 */
a257bf90 1402 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
1403 return 0;
1404}
1405
f9bb2da1
JS
1406static inline unsigned
1407lpfc_cmd_blksize(struct scsi_cmnd *sc)
1408{
1409 return sc->device->sector_size;
1410}
1411
1412#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1413
9a6b09c0
JS
1414/* Return if if error injection is detected by Initiator */
1415#define BG_ERR_INIT 0x1
1416/* Return if if error injection is detected by Target */
1417#define BG_ERR_TGT 0x2
1418/* Return if if swapping CSUM<-->CRC is required for error injection */
1419#define BG_ERR_SWAP 0x10
1420/* Return if disabling Guard/Ref/App checking is required for error injection */
1421#define BG_ERR_CHECK 0x20
acd6859b
JS
1422
1423/**
1424 * lpfc_bg_err_inject - Determine if we should inject an error
1425 * @phba: The Hba for which this call is being executed.
f9bb2da1
JS
1426 * @sc: The SCSI command to examine
1427 * @reftag: (out) BlockGuard reference tag for transmitted data
1428 * @apptag: (out) BlockGuard application tag for transmitted data
1429 * @new_guard (in) Value to replace CRC with if needed
1430 *
9a6b09c0 1431 * Returns BG_ERR_* bit mask or 0 if request ignored
acd6859b 1432 **/
f9bb2da1
JS
1433static int
1434lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1435 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1436{
1437 struct scatterlist *sgpe; /* s/g prot entry */
1438 struct scatterlist *sgde; /* s/g data entry */
9a6b09c0 1439 struct lpfc_scsi_buf *lpfc_cmd = NULL;
acd6859b 1440 struct scsi_dif_tuple *src = NULL;
4ac9b226
JS
1441 struct lpfc_nodelist *ndlp;
1442 struct lpfc_rport_data *rdata;
f9bb2da1
JS
1443 uint32_t op = scsi_get_prot_op(sc);
1444 uint32_t blksize;
1445 uint32_t numblks;
1446 sector_t lba;
1447 int rc = 0;
acd6859b 1448 int blockoff = 0;
f9bb2da1
JS
1449
1450 if (op == SCSI_PROT_NORMAL)
1451 return 0;
1452
acd6859b
JS
1453 sgpe = scsi_prot_sglist(sc);
1454 sgde = scsi_sglist(sc);
f9bb2da1 1455 lba = scsi_get_lba(sc);
4ac9b226
JS
1456
1457 /* First check if we need to match the LBA */
f9bb2da1
JS
1458 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1459 blksize = lpfc_cmd_blksize(sc);
1460 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1461
1462 /* Make sure we have the right LBA if one is specified */
1463 if ((phba->lpfc_injerr_lba < lba) ||
1464 (phba->lpfc_injerr_lba >= (lba + numblks)))
1465 return 0;
acd6859b
JS
1466 if (sgpe) {
1467 blockoff = phba->lpfc_injerr_lba - lba;
1468 numblks = sg_dma_len(sgpe) /
1469 sizeof(struct scsi_dif_tuple);
1470 if (numblks < blockoff)
1471 blockoff = numblks;
acd6859b 1472 }
f9bb2da1
JS
1473 }
1474
4ac9b226
JS
1475 /* Next check if we need to match the remote NPortID or WWPN */
1476 rdata = sc->device->hostdata;
1477 if (rdata && rdata->pnode) {
1478 ndlp = rdata->pnode;
1479
1480 /* Make sure we have the right NPortID if one is specified */
1481 if (phba->lpfc_injerr_nportid &&
1482 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1483 return 0;
1484
1485 /*
1486 * Make sure we have the right WWPN if one is specified.
1487 * wwn[0] should be a non-zero NAA in a good WWPN.
1488 */
1489 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1490 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1491 sizeof(struct lpfc_name)) != 0))
1492 return 0;
1493 }
1494
1495 /* Setup a ptr to the protection data if the SCSI host provides it */
1496 if (sgpe) {
1497 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1498 src += blockoff;
1499 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1500 }
1501
f9bb2da1
JS
1502 /* Should we change the Reference Tag */
1503 if (reftag) {
acd6859b
JS
1504 if (phba->lpfc_injerr_wref_cnt) {
1505 switch (op) {
1506 case SCSI_PROT_WRITE_PASS:
9a6b09c0
JS
1507 if (src) {
1508 /*
1509 * For WRITE_PASS, force the error
1510 * to be sent on the wire. It should
1511 * be detected by the Target.
1512 * If blockoff != 0 error will be
1513 * inserted in middle of the IO.
1514 */
acd6859b
JS
1515
1516 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1517 "9076 BLKGRD: Injecting reftag error: "
1518 "write lba x%lx + x%x oldrefTag x%x\n",
1519 (unsigned long)lba, blockoff,
9a6b09c0 1520 be32_to_cpu(src->ref_tag));
f9bb2da1 1521
acd6859b 1522 /*
9a6b09c0
JS
1523 * Save the old ref_tag so we can
1524 * restore it on completion.
acd6859b 1525 */
9a6b09c0
JS
1526 if (lpfc_cmd) {
1527 lpfc_cmd->prot_data_type =
1528 LPFC_INJERR_REFTAG;
1529 lpfc_cmd->prot_data_segment =
1530 src;
1531 lpfc_cmd->prot_data =
1532 src->ref_tag;
1533 }
1534 src->ref_tag = cpu_to_be32(0xDEADBEEF);
acd6859b 1535 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1536 if (phba->lpfc_injerr_wref_cnt == 0) {
1537 phba->lpfc_injerr_nportid = 0;
1538 phba->lpfc_injerr_lba =
1539 LPFC_INJERR_LBA_OFF;
1540 memset(&phba->lpfc_injerr_wwpn,
1541 0, sizeof(struct lpfc_name));
1542 }
9a6b09c0
JS
1543 rc = BG_ERR_TGT | BG_ERR_CHECK;
1544
acd6859b
JS
1545 break;
1546 }
1547 /* Drop thru */
9a6b09c0 1548 case SCSI_PROT_WRITE_INSERT:
acd6859b 1549 /*
9a6b09c0
JS
1550 * For WRITE_INSERT, force the error
1551 * to be sent on the wire. It should be
1552 * detected by the Target.
acd6859b 1553 */
9a6b09c0 1554 /* DEADBEEF will be the reftag on the wire */
acd6859b
JS
1555 *reftag = 0xDEADBEEF;
1556 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1557 if (phba->lpfc_injerr_wref_cnt == 0) {
1558 phba->lpfc_injerr_nportid = 0;
1559 phba->lpfc_injerr_lba =
1560 LPFC_INJERR_LBA_OFF;
1561 memset(&phba->lpfc_injerr_wwpn,
1562 0, sizeof(struct lpfc_name));
1563 }
9a6b09c0 1564 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1565
1566 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1567 "9078 BLKGRD: Injecting reftag error: "
acd6859b
JS
1568 "write lba x%lx\n", (unsigned long)lba);
1569 break;
9a6b09c0 1570 case SCSI_PROT_WRITE_STRIP:
acd6859b 1571 /*
9a6b09c0
JS
1572 * For WRITE_STRIP and WRITE_PASS,
1573 * force the error on data
1574 * being copied from SLI-Host to SLI-Port.
acd6859b 1575 */
f9bb2da1
JS
1576 *reftag = 0xDEADBEEF;
1577 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1578 if (phba->lpfc_injerr_wref_cnt == 0) {
1579 phba->lpfc_injerr_nportid = 0;
1580 phba->lpfc_injerr_lba =
1581 LPFC_INJERR_LBA_OFF;
1582 memset(&phba->lpfc_injerr_wwpn,
1583 0, sizeof(struct lpfc_name));
1584 }
9a6b09c0 1585 rc = BG_ERR_INIT;
f9bb2da1
JS
1586
1587 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1588 "9077 BLKGRD: Injecting reftag error: "
f9bb2da1 1589 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1590 break;
f9bb2da1 1591 }
acd6859b
JS
1592 }
1593 if (phba->lpfc_injerr_rref_cnt) {
1594 switch (op) {
1595 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1596 case SCSI_PROT_READ_STRIP:
1597 case SCSI_PROT_READ_PASS:
1598 /*
1599 * For READ_STRIP and READ_PASS, force the
1600 * error on data being read off the wire. It
1601 * should force an IO error to the driver.
1602 */
f9bb2da1
JS
1603 *reftag = 0xDEADBEEF;
1604 phba->lpfc_injerr_rref_cnt--;
4ac9b226
JS
1605 if (phba->lpfc_injerr_rref_cnt == 0) {
1606 phba->lpfc_injerr_nportid = 0;
1607 phba->lpfc_injerr_lba =
1608 LPFC_INJERR_LBA_OFF;
1609 memset(&phba->lpfc_injerr_wwpn,
1610 0, sizeof(struct lpfc_name));
1611 }
acd6859b 1612 rc = BG_ERR_INIT;
f9bb2da1
JS
1613
1614 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1615 "9079 BLKGRD: Injecting reftag error: "
f9bb2da1 1616 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1617 break;
f9bb2da1
JS
1618 }
1619 }
1620 }
1621
1622 /* Should we change the Application Tag */
1623 if (apptag) {
acd6859b
JS
1624 if (phba->lpfc_injerr_wapp_cnt) {
1625 switch (op) {
1626 case SCSI_PROT_WRITE_PASS:
4ac9b226 1627 if (src) {
9a6b09c0
JS
1628 /*
1629 * For WRITE_PASS, force the error
1630 * to be sent on the wire. It should
1631 * be detected by the Target.
1632 * If blockoff != 0 error will be
1633 * inserted in middle of the IO.
1634 */
1635
acd6859b
JS
1636 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1637 "9080 BLKGRD: Injecting apptag error: "
1638 "write lba x%lx + x%x oldappTag x%x\n",
1639 (unsigned long)lba, blockoff,
9a6b09c0 1640 be16_to_cpu(src->app_tag));
acd6859b
JS
1641
1642 /*
9a6b09c0
JS
1643 * Save the old app_tag so we can
1644 * restore it on completion.
acd6859b 1645 */
9a6b09c0
JS
1646 if (lpfc_cmd) {
1647 lpfc_cmd->prot_data_type =
1648 LPFC_INJERR_APPTAG;
1649 lpfc_cmd->prot_data_segment =
1650 src;
1651 lpfc_cmd->prot_data =
1652 src->app_tag;
1653 }
1654 src->app_tag = cpu_to_be16(0xDEAD);
acd6859b 1655 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1656 if (phba->lpfc_injerr_wapp_cnt == 0) {
1657 phba->lpfc_injerr_nportid = 0;
1658 phba->lpfc_injerr_lba =
1659 LPFC_INJERR_LBA_OFF;
1660 memset(&phba->lpfc_injerr_wwpn,
1661 0, sizeof(struct lpfc_name));
1662 }
9a6b09c0 1663 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1664 break;
1665 }
1666 /* Drop thru */
9a6b09c0 1667 case SCSI_PROT_WRITE_INSERT:
acd6859b 1668 /*
9a6b09c0
JS
1669 * For WRITE_INSERT, force the
1670 * error to be sent on the wire. It should be
1671 * detected by the Target.
acd6859b 1672 */
9a6b09c0 1673 /* DEAD will be the apptag on the wire */
acd6859b
JS
1674 *apptag = 0xDEAD;
1675 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1676 if (phba->lpfc_injerr_wapp_cnt == 0) {
1677 phba->lpfc_injerr_nportid = 0;
1678 phba->lpfc_injerr_lba =
1679 LPFC_INJERR_LBA_OFF;
1680 memset(&phba->lpfc_injerr_wwpn,
1681 0, sizeof(struct lpfc_name));
1682 }
9a6b09c0 1683 rc = BG_ERR_TGT | BG_ERR_CHECK;
f9bb2da1 1684
acd6859b 1685 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1686 "0813 BLKGRD: Injecting apptag error: "
acd6859b
JS
1687 "write lba x%lx\n", (unsigned long)lba);
1688 break;
9a6b09c0 1689 case SCSI_PROT_WRITE_STRIP:
acd6859b 1690 /*
9a6b09c0
JS
1691 * For WRITE_STRIP and WRITE_PASS,
1692 * force the error on data
1693 * being copied from SLI-Host to SLI-Port.
acd6859b 1694 */
f9bb2da1
JS
1695 *apptag = 0xDEAD;
1696 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1697 if (phba->lpfc_injerr_wapp_cnt == 0) {
1698 phba->lpfc_injerr_nportid = 0;
1699 phba->lpfc_injerr_lba =
1700 LPFC_INJERR_LBA_OFF;
1701 memset(&phba->lpfc_injerr_wwpn,
1702 0, sizeof(struct lpfc_name));
1703 }
9a6b09c0 1704 rc = BG_ERR_INIT;
f9bb2da1
JS
1705
1706 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1707 "0812 BLKGRD: Injecting apptag error: "
f9bb2da1 1708 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1709 break;
f9bb2da1 1710 }
acd6859b
JS
1711 }
1712 if (phba->lpfc_injerr_rapp_cnt) {
1713 switch (op) {
1714 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1715 case SCSI_PROT_READ_STRIP:
1716 case SCSI_PROT_READ_PASS:
1717 /*
1718 * For READ_STRIP and READ_PASS, force the
1719 * error on data being read off the wire. It
1720 * should force an IO error to the driver.
1721 */
f9bb2da1
JS
1722 *apptag = 0xDEAD;
1723 phba->lpfc_injerr_rapp_cnt--;
4ac9b226
JS
1724 if (phba->lpfc_injerr_rapp_cnt == 0) {
1725 phba->lpfc_injerr_nportid = 0;
1726 phba->lpfc_injerr_lba =
1727 LPFC_INJERR_LBA_OFF;
1728 memset(&phba->lpfc_injerr_wwpn,
1729 0, sizeof(struct lpfc_name));
1730 }
acd6859b 1731 rc = BG_ERR_INIT;
f9bb2da1
JS
1732
1733 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1734 "0814 BLKGRD: Injecting apptag error: "
f9bb2da1 1735 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1736 break;
f9bb2da1
JS
1737 }
1738 }
1739 }
1740
acd6859b 1741
f9bb2da1 1742 /* Should we change the Guard Tag */
acd6859b
JS
1743 if (new_guard) {
1744 if (phba->lpfc_injerr_wgrd_cnt) {
1745 switch (op) {
1746 case SCSI_PROT_WRITE_PASS:
9a6b09c0 1747 rc = BG_ERR_CHECK;
acd6859b 1748 /* Drop thru */
9a6b09c0
JS
1749
1750 case SCSI_PROT_WRITE_INSERT:
acd6859b 1751 /*
9a6b09c0
JS
1752 * For WRITE_INSERT, force the
1753 * error to be sent on the wire. It should be
1754 * detected by the Target.
acd6859b
JS
1755 */
1756 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1757 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1758 phba->lpfc_injerr_nportid = 0;
1759 phba->lpfc_injerr_lba =
1760 LPFC_INJERR_LBA_OFF;
1761 memset(&phba->lpfc_injerr_wwpn,
1762 0, sizeof(struct lpfc_name));
1763 }
f9bb2da1 1764
9a6b09c0 1765 rc |= BG_ERR_TGT | BG_ERR_SWAP;
acd6859b 1766 /* Signals the caller to swap CRC->CSUM */
f9bb2da1 1767
acd6859b 1768 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1769 "0817 BLKGRD: Injecting guard error: "
acd6859b
JS
1770 "write lba x%lx\n", (unsigned long)lba);
1771 break;
9a6b09c0 1772 case SCSI_PROT_WRITE_STRIP:
acd6859b 1773 /*
9a6b09c0
JS
1774 * For WRITE_STRIP and WRITE_PASS,
1775 * force the error on data
1776 * being copied from SLI-Host to SLI-Port.
acd6859b
JS
1777 */
1778 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1779 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1780 phba->lpfc_injerr_nportid = 0;
1781 phba->lpfc_injerr_lba =
1782 LPFC_INJERR_LBA_OFF;
1783 memset(&phba->lpfc_injerr_wwpn,
1784 0, sizeof(struct lpfc_name));
1785 }
f9bb2da1 1786
9a6b09c0 1787 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1788 /* Signals the caller to swap CRC->CSUM */
1789
1790 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1791 "0816 BLKGRD: Injecting guard error: "
acd6859b
JS
1792 "write lba x%lx\n", (unsigned long)lba);
1793 break;
1794 }
1795 }
1796 if (phba->lpfc_injerr_rgrd_cnt) {
1797 switch (op) {
1798 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1799 case SCSI_PROT_READ_STRIP:
1800 case SCSI_PROT_READ_PASS:
1801 /*
1802 * For READ_STRIP and READ_PASS, force the
1803 * error on data being read off the wire. It
1804 * should force an IO error to the driver.
1805 */
acd6859b 1806 phba->lpfc_injerr_rgrd_cnt--;
4ac9b226
JS
1807 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1808 phba->lpfc_injerr_nportid = 0;
1809 phba->lpfc_injerr_lba =
1810 LPFC_INJERR_LBA_OFF;
1811 memset(&phba->lpfc_injerr_wwpn,
1812 0, sizeof(struct lpfc_name));
1813 }
acd6859b 1814
9a6b09c0 1815 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1816 /* Signals the caller to swap CRC->CSUM */
1817
1818 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1819 "0818 BLKGRD: Injecting guard error: "
1820 "read lba x%lx\n", (unsigned long)lba);
1821 }
f9bb2da1
JS
1822 }
1823 }
acd6859b 1824
f9bb2da1
JS
1825 return rc;
1826}
1827#endif
1828
acd6859b
JS
1829/**
1830 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1831 * the specified SCSI command.
1832 * @phba: The Hba for which this call is being executed.
6c8eea54
JS
1833 * @sc: The SCSI command to examine
1834 * @txopt: (out) BlockGuard operation for transmitted data
1835 * @rxopt: (out) BlockGuard operation for received data
1836 *
1837 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1838 *
acd6859b 1839 **/
e2a0a9d6 1840static int
6c8eea54
JS
1841lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1842 uint8_t *txop, uint8_t *rxop)
e2a0a9d6
JS
1843{
1844 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
6c8eea54 1845 uint8_t ret = 0;
e2a0a9d6
JS
1846
1847 if (guard_type == SHOST_DIX_GUARD_IP) {
1848 switch (scsi_get_prot_op(sc)) {
1849 case SCSI_PROT_READ_INSERT:
1850 case SCSI_PROT_WRITE_STRIP:
6c8eea54 1851 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1852 *txop = BG_OP_IN_CSUM_OUT_NODIF;
e2a0a9d6
JS
1853 break;
1854
1855 case SCSI_PROT_READ_STRIP:
1856 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1857 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1858 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1859 break;
1860
c6af4042
MP
1861 case SCSI_PROT_READ_PASS:
1862 case SCSI_PROT_WRITE_PASS:
6c8eea54 1863 *rxop = BG_OP_IN_CRC_OUT_CSUM;
4ac9b226 1864 *txop = BG_OP_IN_CSUM_OUT_CRC;
e2a0a9d6
JS
1865 break;
1866
e2a0a9d6
JS
1867 case SCSI_PROT_NORMAL:
1868 default:
6a9c52cf 1869 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1870 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1871 scsi_get_prot_op(sc));
6c8eea54 1872 ret = 1;
e2a0a9d6
JS
1873 break;
1874
1875 }
7c56b9fd 1876 } else {
e2a0a9d6
JS
1877 switch (scsi_get_prot_op(sc)) {
1878 case SCSI_PROT_READ_STRIP:
1879 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1880 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1881 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1882 break;
1883
1884 case SCSI_PROT_READ_PASS:
1885 case SCSI_PROT_WRITE_PASS:
6c8eea54 1886 *rxop = BG_OP_IN_CRC_OUT_CRC;
4ac9b226 1887 *txop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1888 break;
1889
e2a0a9d6
JS
1890 case SCSI_PROT_READ_INSERT:
1891 case SCSI_PROT_WRITE_STRIP:
7c56b9fd 1892 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1893 *txop = BG_OP_IN_CRC_OUT_NODIF;
7c56b9fd
JS
1894 break;
1895
e2a0a9d6
JS
1896 case SCSI_PROT_NORMAL:
1897 default:
6a9c52cf 1898 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1899 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1900 scsi_get_prot_op(sc));
6c8eea54 1901 ret = 1;
e2a0a9d6
JS
1902 break;
1903 }
e2a0a9d6
JS
1904 }
1905
6c8eea54 1906 return ret;
e2a0a9d6
JS
1907}
1908
acd6859b
JS
1909#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1910/**
1911 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1912 * the specified SCSI command in order to force a guard tag error.
1913 * @phba: The Hba for which this call is being executed.
1914 * @sc: The SCSI command to examine
1915 * @txopt: (out) BlockGuard operation for transmitted data
1916 * @rxopt: (out) BlockGuard operation for received data
1917 *
1918 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1919 *
1920 **/
1921static int
1922lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1923 uint8_t *txop, uint8_t *rxop)
1924{
1925 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1926 uint8_t ret = 0;
1927
1928 if (guard_type == SHOST_DIX_GUARD_IP) {
1929 switch (scsi_get_prot_op(sc)) {
1930 case SCSI_PROT_READ_INSERT:
1931 case SCSI_PROT_WRITE_STRIP:
acd6859b 1932 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1933 *txop = BG_OP_IN_CRC_OUT_NODIF;
acd6859b
JS
1934 break;
1935
1936 case SCSI_PROT_READ_STRIP:
1937 case SCSI_PROT_WRITE_INSERT:
acd6859b 1938 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1939 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1940 break;
1941
1942 case SCSI_PROT_READ_PASS:
1943 case SCSI_PROT_WRITE_PASS:
4ac9b226 1944 *rxop = BG_OP_IN_CSUM_OUT_CRC;
9a6b09c0 1945 *txop = BG_OP_IN_CRC_OUT_CSUM;
acd6859b
JS
1946 break;
1947
1948 case SCSI_PROT_NORMAL:
1949 default:
1950 break;
1951
1952 }
1953 } else {
1954 switch (scsi_get_prot_op(sc)) {
1955 case SCSI_PROT_READ_STRIP:
1956 case SCSI_PROT_WRITE_INSERT:
acd6859b 1957 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1958 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1959 break;
1960
1961 case SCSI_PROT_READ_PASS:
1962 case SCSI_PROT_WRITE_PASS:
4ac9b226 1963 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
9a6b09c0 1964 *txop = BG_OP_IN_CSUM_OUT_CSUM;
acd6859b
JS
1965 break;
1966
1967 case SCSI_PROT_READ_INSERT:
1968 case SCSI_PROT_WRITE_STRIP:
acd6859b 1969 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1970 *txop = BG_OP_IN_CSUM_OUT_NODIF;
acd6859b
JS
1971 break;
1972
1973 case SCSI_PROT_NORMAL:
1974 default:
1975 break;
1976 }
1977 }
1978
1979 return ret;
1980}
1981#endif
1982
1983/**
1984 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1985 * @phba: The Hba for which this call is being executed.
1986 * @sc: pointer to scsi command we're working on
1987 * @bpl: pointer to buffer list for protection groups
1988 * @datacnt: number of segments of data that have been dma mapped
1989 *
1990 * This function sets up BPL buffer list for protection groups of
e2a0a9d6
JS
1991 * type LPFC_PG_TYPE_NO_DIF
1992 *
1993 * This is usually used when the HBA is instructed to generate
1994 * DIFs and insert them into data stream (or strip DIF from
1995 * incoming data stream)
1996 *
1997 * The buffer list consists of just one protection group described
1998 * below:
1999 * +-------------------------+
6c8eea54
JS
2000 * start of prot group --> | PDE_5 |
2001 * +-------------------------+
2002 * | PDE_6 |
e2a0a9d6
JS
2003 * +-------------------------+
2004 * | Data BDE |
2005 * +-------------------------+
2006 * |more Data BDE's ... (opt)|
2007 * +-------------------------+
2008 *
e2a0a9d6
JS
2009 *
2010 * Note: Data s/g buffers have been dma mapped
acd6859b
JS
2011 *
2012 * Returns the number of BDEs added to the BPL.
2013 **/
e2a0a9d6
JS
2014static int
2015lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2016 struct ulp_bde64 *bpl, int datasegcnt)
2017{
2018 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
2019 struct lpfc_pde5 *pde5 = NULL;
2020 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 2021 dma_addr_t physaddr;
6c8eea54 2022 int i = 0, num_bde = 0, status;
e2a0a9d6 2023 int datadir = sc->sc_data_direction;
0829a19a 2024#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2025 uint32_t rc;
0829a19a 2026#endif
acd6859b 2027 uint32_t checking = 1;
e2a0a9d6 2028 uint32_t reftag;
7c56b9fd 2029 unsigned blksize;
6c8eea54 2030 uint8_t txop, rxop;
e2a0a9d6 2031
6c8eea54
JS
2032 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2033 if (status)
e2a0a9d6
JS
2034 goto out;
2035
6c8eea54 2036 /* extract some info from the scsi command for pde*/
e2a0a9d6 2037 blksize = lpfc_cmd_blksize(sc);
acd6859b 2038 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 2039
f9bb2da1 2040#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2041 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2042 if (rc) {
9a6b09c0 2043 if (rc & BG_ERR_SWAP)
acd6859b 2044 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2045 if (rc & BG_ERR_CHECK)
acd6859b
JS
2046 checking = 0;
2047 }
f9bb2da1
JS
2048#endif
2049
6c8eea54
JS
2050 /* setup PDE5 with what we have */
2051 pde5 = (struct lpfc_pde5 *) bpl;
2052 memset(pde5, 0, sizeof(struct lpfc_pde5));
2053 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
6c8eea54 2054
bc73905a 2055 /* Endianness conversion if necessary for PDE5 */
589a52d6 2056 pde5->word0 = cpu_to_le32(pde5->word0);
7c56b9fd 2057 pde5->reftag = cpu_to_le32(reftag);
589a52d6 2058
6c8eea54
JS
2059 /* advance bpl and increment bde count */
2060 num_bde++;
2061 bpl++;
2062 pde6 = (struct lpfc_pde6 *) bpl;
2063
2064 /* setup PDE6 with the rest of the info */
2065 memset(pde6, 0, sizeof(struct lpfc_pde6));
2066 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2067 bf_set(pde6_optx, pde6, txop);
2068 bf_set(pde6_oprx, pde6, rxop);
2069 if (datadir == DMA_FROM_DEVICE) {
acd6859b
JS
2070 bf_set(pde6_ce, pde6, checking);
2071 bf_set(pde6_re, pde6, checking);
6c8eea54
JS
2072 }
2073 bf_set(pde6_ai, pde6, 1);
7c56b9fd
JS
2074 bf_set(pde6_ae, pde6, 0);
2075 bf_set(pde6_apptagval, pde6, 0);
e2a0a9d6 2076
bc73905a 2077 /* Endianness conversion if necessary for PDE6 */
589a52d6
JS
2078 pde6->word0 = cpu_to_le32(pde6->word0);
2079 pde6->word1 = cpu_to_le32(pde6->word1);
2080 pde6->word2 = cpu_to_le32(pde6->word2);
2081
6c8eea54 2082 /* advance bpl and increment bde count */
e2a0a9d6
JS
2083 num_bde++;
2084 bpl++;
2085
2086 /* assumption: caller has already run dma_map_sg on command data */
2087 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2088 physaddr = sg_dma_address(sgde);
2089 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2090 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2091 bpl->tus.f.bdeSize = sg_dma_len(sgde);
2092 if (datadir == DMA_TO_DEVICE)
2093 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2094 else
2095 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2096 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2097 bpl++;
2098 num_bde++;
2099 }
2100
2101out:
2102 return num_bde;
2103}
2104
acd6859b
JS
2105/**
2106 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2107 * @phba: The Hba for which this call is being executed.
2108 * @sc: pointer to scsi command we're working on
2109 * @bpl: pointer to buffer list for protection groups
2110 * @datacnt: number of segments of data that have been dma mapped
2111 * @protcnt: number of segment of protection data that have been dma mapped
2112 *
2113 * This function sets up BPL buffer list for protection groups of
2114 * type LPFC_PG_TYPE_DIF
e2a0a9d6
JS
2115 *
2116 * This is usually used when DIFs are in their own buffers,
2117 * separate from the data. The HBA can then by instructed
2118 * to place the DIFs in the outgoing stream. For read operations,
2119 * The HBA could extract the DIFs and place it in DIF buffers.
2120 *
2121 * The buffer list for this type consists of one or more of the
2122 * protection groups described below:
2123 * +-------------------------+
6c8eea54 2124 * start of first prot group --> | PDE_5 |
e2a0a9d6 2125 * +-------------------------+
6c8eea54
JS
2126 * | PDE_6 |
2127 * +-------------------------+
2128 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
2129 * +-------------------------+
2130 * | Data BDE |
2131 * +-------------------------+
2132 * |more Data BDE's ... (opt)|
2133 * +-------------------------+
6c8eea54 2134 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
2135 * +-------------------------+
2136 * | ... |
2137 * +-------------------------+
2138 *
e2a0a9d6
JS
2139 * Note: It is assumed that both data and protection s/g buffers have been
2140 * mapped for DMA
acd6859b
JS
2141 *
2142 * Returns the number of BDEs added to the BPL.
2143 **/
e2a0a9d6
JS
2144static int
2145lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2146 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2147{
2148 struct scatterlist *sgde = NULL; /* s/g data entry */
2149 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
2150 struct lpfc_pde5 *pde5 = NULL;
2151 struct lpfc_pde6 *pde6 = NULL;
7f86059a 2152 struct lpfc_pde7 *pde7 = NULL;
e2a0a9d6
JS
2153 dma_addr_t dataphysaddr, protphysaddr;
2154 unsigned short curr_data = 0, curr_prot = 0;
7f86059a
JS
2155 unsigned int split_offset;
2156 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
e2a0a9d6
JS
2157 unsigned int protgrp_blks, protgrp_bytes;
2158 unsigned int remainder, subtotal;
6c8eea54 2159 int status;
e2a0a9d6
JS
2160 int datadir = sc->sc_data_direction;
2161 unsigned char pgdone = 0, alldone = 0;
2162 unsigned blksize;
0829a19a 2163#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2164 uint32_t rc;
0829a19a 2165#endif
acd6859b 2166 uint32_t checking = 1;
e2a0a9d6 2167 uint32_t reftag;
6c8eea54 2168 uint8_t txop, rxop;
e2a0a9d6
JS
2169 int num_bde = 0;
2170
2171 sgpe = scsi_prot_sglist(sc);
2172 sgde = scsi_sglist(sc);
2173
2174 if (!sgpe || !sgde) {
2175 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
acd6859b
JS
2176 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2177 sgpe, sgde);
2178 return 0;
2179 }
2180
2181 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2182 if (status)
2183 goto out;
2184
2185 /* extract some info from the scsi command */
2186 blksize = lpfc_cmd_blksize(sc);
2187 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2188
2189#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2190 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2191 if (rc) {
9a6b09c0 2192 if (rc & BG_ERR_SWAP)
acd6859b 2193 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2194 if (rc & BG_ERR_CHECK)
acd6859b
JS
2195 checking = 0;
2196 }
2197#endif
2198
2199 split_offset = 0;
2200 do {
96f7077f
JS
2201 /* Check to see if we ran out of space */
2202 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2203 return num_bde + 3;
2204
acd6859b
JS
2205 /* setup PDE5 with what we have */
2206 pde5 = (struct lpfc_pde5 *) bpl;
2207 memset(pde5, 0, sizeof(struct lpfc_pde5));
2208 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2209
2210 /* Endianness conversion if necessary for PDE5 */
2211 pde5->word0 = cpu_to_le32(pde5->word0);
2212 pde5->reftag = cpu_to_le32(reftag);
2213
2214 /* advance bpl and increment bde count */
2215 num_bde++;
2216 bpl++;
2217 pde6 = (struct lpfc_pde6 *) bpl;
2218
2219 /* setup PDE6 with the rest of the info */
2220 memset(pde6, 0, sizeof(struct lpfc_pde6));
2221 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2222 bf_set(pde6_optx, pde6, txop);
2223 bf_set(pde6_oprx, pde6, rxop);
2224 bf_set(pde6_ce, pde6, checking);
2225 bf_set(pde6_re, pde6, checking);
2226 bf_set(pde6_ai, pde6, 1);
2227 bf_set(pde6_ae, pde6, 0);
2228 bf_set(pde6_apptagval, pde6, 0);
2229
2230 /* Endianness conversion if necessary for PDE6 */
2231 pde6->word0 = cpu_to_le32(pde6->word0);
2232 pde6->word1 = cpu_to_le32(pde6->word1);
2233 pde6->word2 = cpu_to_le32(pde6->word2);
2234
2235 /* advance bpl and increment bde count */
2236 num_bde++;
2237 bpl++;
2238
2239 /* setup the first BDE that points to protection buffer */
2240 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2241 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2242
2243 /* must be integer multiple of the DIF block length */
2244 BUG_ON(protgroup_len % 8);
2245
2246 pde7 = (struct lpfc_pde7 *) bpl;
2247 memset(pde7, 0, sizeof(struct lpfc_pde7));
2248 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2249
2250 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2251 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2252
2253 protgrp_blks = protgroup_len / 8;
2254 protgrp_bytes = protgrp_blks * blksize;
2255
2256 /* check if this pde is crossing the 4K boundary; if so split */
2257 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2258 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2259 protgroup_offset += protgroup_remainder;
2260 protgrp_blks = protgroup_remainder / 8;
2261 protgrp_bytes = protgrp_blks * blksize;
2262 } else {
2263 protgroup_offset = 0;
2264 curr_prot++;
2265 }
2266
2267 num_bde++;
2268
2269 /* setup BDE's for data blocks associated with DIF data */
2270 pgdone = 0;
2271 subtotal = 0; /* total bytes processed for current prot grp */
2272 while (!pgdone) {
96f7077f
JS
2273 /* Check to see if we ran out of space */
2274 if (num_bde >= phba->cfg_total_seg_cnt)
2275 return num_bde + 1;
2276
acd6859b
JS
2277 if (!sgde) {
2278 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2279 "9065 BLKGRD:%s Invalid data segment\n",
2280 __func__);
2281 return 0;
2282 }
2283 bpl++;
2284 dataphysaddr = sg_dma_address(sgde) + split_offset;
2285 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2286 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2287
2288 remainder = sg_dma_len(sgde) - split_offset;
2289
2290 if ((subtotal + remainder) <= protgrp_bytes) {
2291 /* we can use this whole buffer */
2292 bpl->tus.f.bdeSize = remainder;
2293 split_offset = 0;
2294
2295 if ((subtotal + remainder) == protgrp_bytes)
2296 pgdone = 1;
2297 } else {
2298 /* must split this buffer with next prot grp */
2299 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2300 split_offset += bpl->tus.f.bdeSize;
2301 }
2302
2303 subtotal += bpl->tus.f.bdeSize;
2304
2305 if (datadir == DMA_TO_DEVICE)
2306 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2307 else
2308 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2309 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2310
2311 num_bde++;
2312 curr_data++;
2313
2314 if (split_offset)
2315 break;
2316
2317 /* Move to the next s/g segment if possible */
2318 sgde = sg_next(sgde);
2319
2320 }
2321
2322 if (protgroup_offset) {
2323 /* update the reference tag */
2324 reftag += protgrp_blks;
2325 bpl++;
2326 continue;
2327 }
2328
2329 /* are we done ? */
2330 if (curr_prot == protcnt) {
2331 alldone = 1;
2332 } else if (curr_prot < protcnt) {
2333 /* advance to next prot buffer */
2334 sgpe = sg_next(sgpe);
2335 bpl++;
2336
2337 /* update the reference tag */
2338 reftag += protgrp_blks;
2339 } else {
2340 /* if we're here, we have a bug */
2341 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2342 "9054 BLKGRD: bug in %s\n", __func__);
2343 }
2344
2345 } while (!alldone);
2346out:
2347
2348 return num_bde;
2349}
2350
2351/**
2352 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2353 * @phba: The Hba for which this call is being executed.
2354 * @sc: pointer to scsi command we're working on
2355 * @sgl: pointer to buffer list for protection groups
2356 * @datacnt: number of segments of data that have been dma mapped
2357 *
2358 * This function sets up SGL buffer list for protection groups of
2359 * type LPFC_PG_TYPE_NO_DIF
2360 *
2361 * This is usually used when the HBA is instructed to generate
2362 * DIFs and insert them into data stream (or strip DIF from
2363 * incoming data stream)
2364 *
2365 * The buffer list consists of just one protection group described
2366 * below:
2367 * +-------------------------+
2368 * start of prot group --> | DI_SEED |
2369 * +-------------------------+
2370 * | Data SGE |
2371 * +-------------------------+
2372 * |more Data SGE's ... (opt)|
2373 * +-------------------------+
2374 *
2375 *
2376 * Note: Data s/g buffers have been dma mapped
2377 *
2378 * Returns the number of SGEs added to the SGL.
2379 **/
2380static int
2381lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2382 struct sli4_sge *sgl, int datasegcnt)
2383{
2384 struct scatterlist *sgde = NULL; /* s/g data entry */
2385 struct sli4_sge_diseed *diseed = NULL;
2386 dma_addr_t physaddr;
2387 int i = 0, num_sge = 0, status;
2388 int datadir = sc->sc_data_direction;
2389 uint32_t reftag;
2390 unsigned blksize;
2391 uint8_t txop, rxop;
0829a19a 2392#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2393 uint32_t rc;
0829a19a 2394#endif
acd6859b
JS
2395 uint32_t checking = 1;
2396 uint32_t dma_len;
2397 uint32_t dma_offset = 0;
2398
2399 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2400 if (status)
2401 goto out;
2402
2403 /* extract some info from the scsi command for pde*/
2404 blksize = lpfc_cmd_blksize(sc);
2405 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2406
2407#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2408 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2409 if (rc) {
9a6b09c0 2410 if (rc & BG_ERR_SWAP)
acd6859b 2411 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2412 if (rc & BG_ERR_CHECK)
acd6859b
JS
2413 checking = 0;
2414 }
2415#endif
2416
2417 /* setup DISEED with what we have */
2418 diseed = (struct sli4_sge_diseed *) sgl;
2419 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2420 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2421
2422 /* Endianness conversion if necessary */
2423 diseed->ref_tag = cpu_to_le32(reftag);
2424 diseed->ref_tag_tran = diseed->ref_tag;
2425
2426 /* setup DISEED with the rest of the info */
2427 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2428 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2429 if (datadir == DMA_FROM_DEVICE) {
2430 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2431 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2432 }
2433 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2434 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2435
2436 /* Endianness conversion if necessary for DISEED */
2437 diseed->word2 = cpu_to_le32(diseed->word2);
2438 diseed->word3 = cpu_to_le32(diseed->word3);
2439
2440 /* advance bpl and increment sge count */
2441 num_sge++;
2442 sgl++;
2443
2444 /* assumption: caller has already run dma_map_sg on command data */
2445 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2446 physaddr = sg_dma_address(sgde);
2447 dma_len = sg_dma_len(sgde);
2448 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2449 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2450 if ((i + 1) == datasegcnt)
2451 bf_set(lpfc_sli4_sge_last, sgl, 1);
2452 else
2453 bf_set(lpfc_sli4_sge_last, sgl, 0);
2454 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2455 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2456
2457 sgl->sge_len = cpu_to_le32(dma_len);
2458 dma_offset += dma_len;
2459
2460 sgl++;
2461 num_sge++;
2462 }
2463
2464out:
2465 return num_sge;
2466}
2467
2468/**
2469 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2470 * @phba: The Hba for which this call is being executed.
2471 * @sc: pointer to scsi command we're working on
2472 * @sgl: pointer to buffer list for protection groups
2473 * @datacnt: number of segments of data that have been dma mapped
2474 * @protcnt: number of segment of protection data that have been dma mapped
2475 *
2476 * This function sets up SGL buffer list for protection groups of
2477 * type LPFC_PG_TYPE_DIF
2478 *
2479 * This is usually used when DIFs are in their own buffers,
2480 * separate from the data. The HBA can then by instructed
2481 * to place the DIFs in the outgoing stream. For read operations,
2482 * The HBA could extract the DIFs and place it in DIF buffers.
2483 *
2484 * The buffer list for this type consists of one or more of the
2485 * protection groups described below:
2486 * +-------------------------+
2487 * start of first prot group --> | DISEED |
2488 * +-------------------------+
2489 * | DIF (Prot SGE) |
2490 * +-------------------------+
2491 * | Data SGE |
2492 * +-------------------------+
2493 * |more Data SGE's ... (opt)|
2494 * +-------------------------+
2495 * start of new prot group --> | DISEED |
2496 * +-------------------------+
2497 * | ... |
2498 * +-------------------------+
2499 *
2500 * Note: It is assumed that both data and protection s/g buffers have been
2501 * mapped for DMA
2502 *
2503 * Returns the number of SGEs added to the SGL.
2504 **/
2505static int
2506lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2507 struct sli4_sge *sgl, int datacnt, int protcnt)
2508{
2509 struct scatterlist *sgde = NULL; /* s/g data entry */
2510 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2511 struct sli4_sge_diseed *diseed = NULL;
2512 dma_addr_t dataphysaddr, protphysaddr;
2513 unsigned short curr_data = 0, curr_prot = 0;
2514 unsigned int split_offset;
2515 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2516 unsigned int protgrp_blks, protgrp_bytes;
2517 unsigned int remainder, subtotal;
2518 int status;
2519 unsigned char pgdone = 0, alldone = 0;
2520 unsigned blksize;
2521 uint32_t reftag;
2522 uint8_t txop, rxop;
2523 uint32_t dma_len;
0829a19a 2524#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2525 uint32_t rc;
0829a19a 2526#endif
acd6859b
JS
2527 uint32_t checking = 1;
2528 uint32_t dma_offset = 0;
2529 int num_sge = 0;
2530
2531 sgpe = scsi_prot_sglist(sc);
2532 sgde = scsi_sglist(sc);
2533
2534 if (!sgpe || !sgde) {
2535 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2536 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
e2a0a9d6
JS
2537 sgpe, sgde);
2538 return 0;
2539 }
2540
6c8eea54
JS
2541 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2542 if (status)
e2a0a9d6
JS
2543 goto out;
2544
6c8eea54 2545 /* extract some info from the scsi command */
e2a0a9d6 2546 blksize = lpfc_cmd_blksize(sc);
acd6859b 2547 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 2548
f9bb2da1 2549#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2550 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2551 if (rc) {
9a6b09c0 2552 if (rc & BG_ERR_SWAP)
acd6859b 2553 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2554 if (rc & BG_ERR_CHECK)
acd6859b
JS
2555 checking = 0;
2556 }
f9bb2da1
JS
2557#endif
2558
e2a0a9d6
JS
2559 split_offset = 0;
2560 do {
96f7077f
JS
2561 /* Check to see if we ran out of space */
2562 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2563 return num_sge + 3;
2564
acd6859b
JS
2565 /* setup DISEED with what we have */
2566 diseed = (struct sli4_sge_diseed *) sgl;
2567 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2568 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2569
2570 /* Endianness conversion if necessary */
2571 diseed->ref_tag = cpu_to_le32(reftag);
2572 diseed->ref_tag_tran = diseed->ref_tag;
2573
2574 /* setup DISEED with the rest of the info */
2575 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2576 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2577 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2578 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2579 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2580 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2581
2582 /* Endianness conversion if necessary for DISEED */
2583 diseed->word2 = cpu_to_le32(diseed->word2);
2584 diseed->word3 = cpu_to_le32(diseed->word3);
2585
2586 /* advance sgl and increment bde count */
2587 num_sge++;
2588 sgl++;
e2a0a9d6
JS
2589
2590 /* setup the first BDE that points to protection buffer */
7f86059a
JS
2591 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2592 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
e2a0a9d6 2593
e2a0a9d6
JS
2594 /* must be integer multiple of the DIF block length */
2595 BUG_ON(protgroup_len % 8);
2596
acd6859b
JS
2597 /* Now setup DIF SGE */
2598 sgl->word2 = 0;
2599 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2600 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2601 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2602 sgl->word2 = cpu_to_le32(sgl->word2);
7f86059a 2603
e2a0a9d6
JS
2604 protgrp_blks = protgroup_len / 8;
2605 protgrp_bytes = protgrp_blks * blksize;
2606
acd6859b
JS
2607 /* check if DIF SGE is crossing the 4K boundary; if so split */
2608 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2609 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
7f86059a
JS
2610 protgroup_offset += protgroup_remainder;
2611 protgrp_blks = protgroup_remainder / 8;
7c56b9fd 2612 protgrp_bytes = protgrp_blks * blksize;
7f86059a
JS
2613 } else {
2614 protgroup_offset = 0;
2615 curr_prot++;
2616 }
e2a0a9d6 2617
acd6859b 2618 num_sge++;
e2a0a9d6 2619
acd6859b 2620 /* setup SGE's for data blocks associated with DIF data */
e2a0a9d6
JS
2621 pgdone = 0;
2622 subtotal = 0; /* total bytes processed for current prot grp */
2623 while (!pgdone) {
96f7077f
JS
2624 /* Check to see if we ran out of space */
2625 if (num_sge >= phba->cfg_total_seg_cnt)
2626 return num_sge + 1;
2627
e2a0a9d6 2628 if (!sgde) {
6a9c52cf 2629 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2630 "9086 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
2631 __func__);
2632 return 0;
2633 }
acd6859b 2634 sgl++;
e2a0a9d6 2635 dataphysaddr = sg_dma_address(sgde) + split_offset;
e2a0a9d6
JS
2636
2637 remainder = sg_dma_len(sgde) - split_offset;
2638
2639 if ((subtotal + remainder) <= protgrp_bytes) {
2640 /* we can use this whole buffer */
acd6859b 2641 dma_len = remainder;
e2a0a9d6
JS
2642 split_offset = 0;
2643
2644 if ((subtotal + remainder) == protgrp_bytes)
2645 pgdone = 1;
2646 } else {
2647 /* must split this buffer with next prot grp */
acd6859b
JS
2648 dma_len = protgrp_bytes - subtotal;
2649 split_offset += dma_len;
e2a0a9d6
JS
2650 }
2651
acd6859b 2652 subtotal += dma_len;
e2a0a9d6 2653
acd6859b
JS
2654 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2655 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2656 bf_set(lpfc_sli4_sge_last, sgl, 0);
2657 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2658 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
e2a0a9d6 2659
acd6859b
JS
2660 sgl->sge_len = cpu_to_le32(dma_len);
2661 dma_offset += dma_len;
2662
2663 num_sge++;
e2a0a9d6
JS
2664 curr_data++;
2665
2666 if (split_offset)
2667 break;
2668
2669 /* Move to the next s/g segment if possible */
2670 sgde = sg_next(sgde);
2671 }
2672
7f86059a
JS
2673 if (protgroup_offset) {
2674 /* update the reference tag */
2675 reftag += protgrp_blks;
acd6859b 2676 sgl++;
7f86059a
JS
2677 continue;
2678 }
2679
e2a0a9d6
JS
2680 /* are we done ? */
2681 if (curr_prot == protcnt) {
acd6859b 2682 bf_set(lpfc_sli4_sge_last, sgl, 1);
e2a0a9d6
JS
2683 alldone = 1;
2684 } else if (curr_prot < protcnt) {
2685 /* advance to next prot buffer */
2686 sgpe = sg_next(sgpe);
acd6859b 2687 sgl++;
e2a0a9d6
JS
2688
2689 /* update the reference tag */
2690 reftag += protgrp_blks;
2691 } else {
2692 /* if we're here, we have a bug */
6a9c52cf 2693 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2694 "9085 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
2695 }
2696
2697 } while (!alldone);
acd6859b 2698
e2a0a9d6
JS
2699out:
2700
acd6859b 2701 return num_sge;
e2a0a9d6 2702}
7f86059a 2703
acd6859b
JS
2704/**
2705 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2706 * @phba: The Hba for which this call is being executed.
2707 * @sc: pointer to scsi command we're working on
2708 *
e2a0a9d6
JS
2709 * Given a SCSI command that supports DIF, determine composition of protection
2710 * groups involved in setting up buffer lists
2711 *
acd6859b
JS
2712 * Returns: Protection group type (with or without DIF)
2713 *
2714 **/
e2a0a9d6
JS
2715static int
2716lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2717{
2718 int ret = LPFC_PG_TYPE_INVALID;
2719 unsigned char op = scsi_get_prot_op(sc);
2720
2721 switch (op) {
2722 case SCSI_PROT_READ_STRIP:
2723 case SCSI_PROT_WRITE_INSERT:
2724 ret = LPFC_PG_TYPE_NO_DIF;
2725 break;
2726 case SCSI_PROT_READ_INSERT:
2727 case SCSI_PROT_WRITE_STRIP:
2728 case SCSI_PROT_READ_PASS:
2729 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
2730 ret = LPFC_PG_TYPE_DIF_BUF;
2731 break;
2732 default:
2733 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2734 "9021 Unsupported protection op:%d\n", op);
2735 break;
2736 }
2737
2738 return ret;
2739}
2740
acd6859b
JS
2741/**
2742 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2743 * @phba: The Hba for which this call is being executed.
2744 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2745 *
e2a0a9d6
JS
2746 * This is the protection/DIF aware version of
2747 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2748 * two functions eventually, but for now, it's here
acd6859b 2749 **/
e2a0a9d6 2750static int
acd6859b 2751lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
e2a0a9d6
JS
2752 struct lpfc_scsi_buf *lpfc_cmd)
2753{
2754 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2755 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2756 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2757 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2758 uint32_t num_bde = 0;
2759 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2760 int prot_group_type = 0;
2761 int diflen, fcpdl;
2762 unsigned blksize;
2763
2764 /*
2765 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2766 * fcp_rsp regions to the first data bde entry
2767 */
2768 bpl += 2;
2769 if (scsi_sg_count(scsi_cmnd)) {
2770 /*
2771 * The driver stores the segment count returned from pci_map_sg
2772 * because this a count of dma-mappings used to map the use_sg
2773 * pages. They are not guaranteed to be the same for those
2774 * architectures that implement an IOMMU.
2775 */
2776 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2777 scsi_sglist(scsi_cmnd),
2778 scsi_sg_count(scsi_cmnd), datadir);
2779 if (unlikely(!datasegcnt))
2780 return 1;
2781
2782 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
2783
2784 /* First check if data segment count from SCSI Layer is good */
2785 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2786 goto err;
e2a0a9d6
JS
2787
2788 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2789
2790 switch (prot_group_type) {
2791 case LPFC_PG_TYPE_NO_DIF:
96f7077f
JS
2792
2793 /* Here we need to add a PDE5 and PDE6 to the count */
2794 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2795 goto err;
2796
e2a0a9d6
JS
2797 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2798 datasegcnt);
c9404c9c 2799 /* we should have 2 or more entries in buffer list */
e2a0a9d6
JS
2800 if (num_bde < 2)
2801 goto err;
2802 break;
96f7077f
JS
2803
2804 case LPFC_PG_TYPE_DIF_BUF:
e2a0a9d6
JS
2805 /*
2806 * This type indicates that protection buffers are
2807 * passed to the driver, so that needs to be prepared
2808 * for DMA
2809 */
2810 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2811 scsi_prot_sglist(scsi_cmnd),
2812 scsi_prot_sg_count(scsi_cmnd), datadir);
2813 if (unlikely(!protsegcnt)) {
2814 scsi_dma_unmap(scsi_cmnd);
2815 return 1;
2816 }
2817
2818 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
2819
2820 /*
2821 * There is a minimun of 4 BPLs used for every
2822 * protection data segment.
2823 */
2824 if ((lpfc_cmd->prot_seg_cnt * 4) >
2825 (phba->cfg_total_seg_cnt - 2))
2826 goto err;
e2a0a9d6
JS
2827
2828 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2829 datasegcnt, protsegcnt);
c9404c9c 2830 /* we should have 3 or more entries in buffer list */
96f7077f
JS
2831 if ((num_bde < 3) ||
2832 (num_bde > phba->cfg_total_seg_cnt))
e2a0a9d6
JS
2833 goto err;
2834 break;
96f7077f 2835
e2a0a9d6
JS
2836 case LPFC_PG_TYPE_INVALID:
2837 default:
96f7077f
JS
2838 scsi_dma_unmap(scsi_cmnd);
2839 lpfc_cmd->seg_cnt = 0;
2840
e2a0a9d6
JS
2841 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2842 "9022 Unexpected protection group %i\n",
2843 prot_group_type);
2844 return 1;
2845 }
2846 }
2847
2848 /*
2849 * Finish initializing those IOCB fields that are dependent on the
2850 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2851 * reinitialized since all iocb memory resources are used many times
2852 * for transmit, receive, and continuation bpl's.
2853 */
2854 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2855 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2856 iocb_cmd->ulpBdeCount = 1;
2857 iocb_cmd->ulpLe = 1;
2858
2859 fcpdl = scsi_bufflen(scsi_cmnd);
2860
2861 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2862 /*
2863 * We are in DIF Type 1 mode
2864 * Every data block has a 8 byte DIF (trailer)
2865 * attached to it. Must ajust FCP data length
2866 */
2867 blksize = lpfc_cmd_blksize(scsi_cmnd);
2868 diflen = (fcpdl / blksize) * 8;
2869 fcpdl += diflen;
2870 }
2871 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2872
2873 /*
2874 * Due to difference in data length between DIF/non-DIF paths,
2875 * we need to set word 4 of IOCB here
2876 */
2877 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2878
dea3101e 2879 return 0;
e2a0a9d6 2880err:
96f7077f
JS
2881 if (lpfc_cmd->seg_cnt)
2882 scsi_dma_unmap(scsi_cmnd);
2883 if (lpfc_cmd->prot_seg_cnt)
2884 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2885 scsi_prot_sg_count(scsi_cmnd),
2886 scsi_cmnd->sc_data_direction);
2887
e2a0a9d6 2888 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
96f7077f
JS
2889 "9023 Cannot setup S/G List for HBA"
2890 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2891 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2892 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
e2a0a9d6 2893 prot_group_type, num_bde);
96f7077f
JS
2894
2895 lpfc_cmd->seg_cnt = 0;
2896 lpfc_cmd->prot_seg_cnt = 0;
e2a0a9d6
JS
2897 return 1;
2898}
2899
737d4248
JS
2900/*
2901 * This function calcuates the T10 DIF guard tag
2902 * on the specified data using a CRC algorithmn
2903 * using crc_t10dif.
2904 */
2905uint16_t
2906lpfc_bg_crc(uint8_t *data, int count)
2907{
2908 uint16_t crc = 0;
2909 uint16_t x;
2910
2911 crc = crc_t10dif(data, count);
2912 x = cpu_to_be16(crc);
2913 return x;
2914}
2915
2916/*
2917 * This function calcuates the T10 DIF guard tag
2918 * on the specified data using a CSUM algorithmn
2919 * using ip_compute_csum.
2920 */
2921uint16_t
2922lpfc_bg_csum(uint8_t *data, int count)
2923{
2924 uint16_t ret;
2925
2926 ret = ip_compute_csum(data, count);
2927 return ret;
2928}
2929
2930/*
2931 * This function examines the protection data to try to determine
2932 * what type of T10-DIF error occurred.
2933 */
2934void
2935lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2936{
2937 struct scatterlist *sgpe; /* s/g prot entry */
2938 struct scatterlist *sgde; /* s/g data entry */
2939 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2940 struct scsi_dif_tuple *src = NULL;
2941 uint8_t *data_src = NULL;
2942 uint16_t guard_tag, guard_type;
2943 uint16_t start_app_tag, app_tag;
2944 uint32_t start_ref_tag, ref_tag;
2945 int prot, protsegcnt;
2946 int err_type, len, data_len;
2947 int chk_ref, chk_app, chk_guard;
2948 uint16_t sum;
2949 unsigned blksize;
2950
2951 err_type = BGS_GUARD_ERR_MASK;
2952 sum = 0;
2953 guard_tag = 0;
2954
2955 /* First check to see if there is protection data to examine */
2956 prot = scsi_get_prot_op(cmd);
2957 if ((prot == SCSI_PROT_READ_STRIP) ||
2958 (prot == SCSI_PROT_WRITE_INSERT) ||
2959 (prot == SCSI_PROT_NORMAL))
2960 goto out;
2961
2962 /* Currently the driver just supports ref_tag and guard_tag checking */
2963 chk_ref = 1;
2964 chk_app = 0;
2965 chk_guard = 0;
2966
2967 /* Setup a ptr to the protection data provided by the SCSI host */
2968 sgpe = scsi_prot_sglist(cmd);
2969 protsegcnt = lpfc_cmd->prot_seg_cnt;
2970
2971 if (sgpe && protsegcnt) {
2972
2973 /*
2974 * We will only try to verify guard tag if the segment
2975 * data length is a multiple of the blksize.
2976 */
2977 sgde = scsi_sglist(cmd);
2978 blksize = lpfc_cmd_blksize(cmd);
2979 data_src = (uint8_t *)sg_virt(sgde);
2980 data_len = sgde->length;
2981 if ((data_len & (blksize - 1)) == 0)
2982 chk_guard = 1;
2983 guard_type = scsi_host_get_guard(cmd->device->host);
2984
2985 start_ref_tag = scsi_get_lba(cmd);
2986 start_app_tag = src->app_tag;
2987 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2988 len = sgpe->length;
2989 while (src && protsegcnt) {
2990 while (len) {
2991
2992 /*
2993 * First check to see if a protection data
2994 * check is valid
2995 */
2996 if ((src->ref_tag == 0xffffffff) ||
2997 (src->app_tag == 0xffff)) {
2998 start_ref_tag++;
2999 goto skipit;
3000 }
3001
3002 /* App Tag checking */
3003 app_tag = src->app_tag;
3004 if (chk_app && (app_tag != start_app_tag)) {
3005 err_type = BGS_APPTAG_ERR_MASK;
3006 goto out;
3007 }
3008
3009 /* Reference Tag checking */
3010 ref_tag = be32_to_cpu(src->ref_tag);
3011 if (chk_ref && (ref_tag != start_ref_tag)) {
3012 err_type = BGS_REFTAG_ERR_MASK;
3013 goto out;
3014 }
3015 start_ref_tag++;
3016
3017 /* Guard Tag checking */
3018 if (chk_guard) {
3019 guard_tag = src->guard_tag;
3020 if (guard_type == SHOST_DIX_GUARD_IP)
3021 sum = lpfc_bg_csum(data_src,
3022 blksize);
3023 else
3024 sum = lpfc_bg_crc(data_src,
3025 blksize);
3026 if ((guard_tag != sum)) {
3027 err_type = BGS_GUARD_ERR_MASK;
3028 goto out;
3029 }
3030 }
3031skipit:
3032 len -= sizeof(struct scsi_dif_tuple);
3033 if (len < 0)
3034 len = 0;
3035 src++;
3036
3037 data_src += blksize;
3038 data_len -= blksize;
3039
3040 /*
3041 * Are we at the end of the Data segment?
3042 * The data segment is only used for Guard
3043 * tag checking.
3044 */
3045 if (chk_guard && (data_len == 0)) {
3046 chk_guard = 0;
3047 sgde = sg_next(sgde);
3048 if (!sgde)
3049 goto out;
3050
3051 data_src = (uint8_t *)sg_virt(sgde);
3052 data_len = sgde->length;
3053 if ((data_len & (blksize - 1)) == 0)
3054 chk_guard = 1;
3055 }
3056 }
3057
3058 /* Goto the next Protection data segment */
3059 sgpe = sg_next(sgpe);
3060 if (sgpe) {
3061 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3062 len = sgpe->length;
3063 } else {
3064 src = NULL;
3065 }
3066 protsegcnt--;
3067 }
3068 }
3069out:
3070 if (err_type == BGS_GUARD_ERR_MASK) {
3071 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3072 0x10, 0x1);
3073 cmd->result = DRIVER_SENSE << 24
3074 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3075 phba->bg_guard_err_cnt++;
3076 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3077 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3078 (unsigned long)scsi_get_lba(cmd),
3079 sum, guard_tag);
3080
3081 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3082 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3083 0x10, 0x3);
3084 cmd->result = DRIVER_SENSE << 24
3085 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3086
3087 phba->bg_reftag_err_cnt++;
3088 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3089 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3090 (unsigned long)scsi_get_lba(cmd),
3091 ref_tag, start_ref_tag);
3092
3093 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3094 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3095 0x10, 0x2);
3096 cmd->result = DRIVER_SENSE << 24
3097 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3098
3099 phba->bg_apptag_err_cnt++;
3100 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3101 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3102 (unsigned long)scsi_get_lba(cmd),
3103 app_tag, start_app_tag);
3104 }
3105}
3106
3107
e2a0a9d6
JS
3108/*
3109 * This function checks for BlockGuard errors detected by
3110 * the HBA. In case of errors, the ASC/ASCQ fields in the
3111 * sense buffer will be set accordingly, paired with
3112 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3113 * detected corruption.
3114 *
3115 * Returns:
3116 * 0 - No error found
3117 * 1 - BlockGuard error found
3118 * -1 - Internal error (bad profile, ...etc)
3119 */
3120static int
3121lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3122 struct lpfc_iocbq *pIocbOut)
3123{
3124 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3125 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3126 int ret = 0;
3127 uint32_t bghm = bgf->bghm;
3128 uint32_t bgstat = bgf->bgstat;
3129 uint64_t failing_sector = 0;
3130
e2a0a9d6
JS
3131 spin_lock(&_dump_buf_lock);
3132 if (!_dump_buf_done) {
6a9c52cf
JS
3133 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
3134 " Data for %u blocks to debugfs\n",
e2a0a9d6 3135 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
6a9c52cf 3136 lpfc_debug_save_data(phba, cmd);
e2a0a9d6
JS
3137
3138 /* If we have a prot sgl, save the DIF buffer */
3139 if (lpfc_prot_group_type(phba, cmd) ==
3140 LPFC_PG_TYPE_DIF_BUF) {
6a9c52cf
JS
3141 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3142 "Saving DIF for %u blocks to debugfs\n",
3143 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3144 lpfc_debug_save_dif(phba, cmd);
e2a0a9d6
JS
3145 }
3146
3147 _dump_buf_done = 1;
3148 }
3149 spin_unlock(&_dump_buf_lock);
3150
3151 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3152 cmd->result = ScsiResult(DID_ERROR, 0);
737d4248
JS
3153 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3154 "9072 BLKGRD: Invalid BG Profile in cmd"
3155 " 0x%x lba 0x%llx blk cnt 0x%x "
3156 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3157 (unsigned long long)scsi_get_lba(cmd),
3158 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3159 ret = (-1);
3160 goto out;
3161 }
3162
3163 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3164 cmd->result = ScsiResult(DID_ERROR, 0);
737d4248
JS
3165 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3166 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3167 " 0x%x lba 0x%llx blk cnt 0x%x "
3168 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3169 (unsigned long long)scsi_get_lba(cmd),
3170 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3171 ret = (-1);
3172 goto out;
3173 }
3174
3175 if (lpfc_bgs_get_guard_err(bgstat)) {
3176 ret = 1;
3177
3178 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3179 0x10, 0x1);
1c9fbafc 3180 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
3181 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3182 phba->bg_guard_err_cnt++;
737d4248
JS
3183 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3184 "9055 BLKGRD: Guard Tag error in cmd"
3185 " 0x%x lba 0x%llx blk cnt 0x%x "
3186 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3187 (unsigned long long)scsi_get_lba(cmd),
3188 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3189 }
3190
3191 if (lpfc_bgs_get_reftag_err(bgstat)) {
3192 ret = 1;
3193
3194 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3195 0x10, 0x3);
1c9fbafc 3196 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
3197 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3198
3199 phba->bg_reftag_err_cnt++;
737d4248
JS
3200 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3201 "9056 BLKGRD: Ref Tag error in cmd"
3202 " 0x%x lba 0x%llx blk cnt 0x%x "
3203 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3204 (unsigned long long)scsi_get_lba(cmd),
3205 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3206 }
3207
3208 if (lpfc_bgs_get_apptag_err(bgstat)) {
3209 ret = 1;
3210
3211 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3212 0x10, 0x2);
1c9fbafc 3213 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
3214 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3215
3216 phba->bg_apptag_err_cnt++;
737d4248
JS
3217 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3218 "9061 BLKGRD: App Tag error in cmd"
3219 " 0x%x lba 0x%llx blk cnt 0x%x "
3220 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3221 (unsigned long long)scsi_get_lba(cmd),
3222 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3223 }
3224
3225 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3226 /*
3227 * setup sense data descriptor 0 per SPC-4 as an information
7c56b9fd
JS
3228 * field, and put the failing LBA in it.
3229 * This code assumes there was also a guard/app/ref tag error
3230 * indication.
e2a0a9d6 3231 */
7c56b9fd
JS
3232 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3233 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3234 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3235 cmd->sense_buffer[10] = 0x80; /* Validity bit */
acd6859b
JS
3236
3237 /* bghm is a "on the wire" FC frame based count */
3238 switch (scsi_get_prot_op(cmd)) {
3239 case SCSI_PROT_READ_INSERT:
3240 case SCSI_PROT_WRITE_STRIP:
3241 bghm /= cmd->device->sector_size;
3242 break;
3243 case SCSI_PROT_READ_STRIP:
3244 case SCSI_PROT_WRITE_INSERT:
3245 case SCSI_PROT_READ_PASS:
3246 case SCSI_PROT_WRITE_PASS:
3247 bghm /= (cmd->device->sector_size +
3248 sizeof(struct scsi_dif_tuple));
3249 break;
3250 }
e2a0a9d6
JS
3251
3252 failing_sector = scsi_get_lba(cmd);
3253 failing_sector += bghm;
3254
7c56b9fd
JS
3255 /* Descriptor Information */
3256 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
e2a0a9d6
JS
3257 }
3258
3259 if (!ret) {
3260 /* No error was reported - problem in FW? */
737d4248
JS
3261 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3262 "9057 BLKGRD: Unknown error in cmd"
3263 " 0x%x lba 0x%llx blk cnt 0x%x "
3264 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3265 (unsigned long long)scsi_get_lba(cmd),
3266 blk_rq_sectors(cmd->request), bgstat, bghm);
3267
3268 /* Calcuate what type of error it was */
3269 lpfc_calc_bg_err(phba, lpfc_cmd);
e2a0a9d6 3270 }
e2a0a9d6
JS
3271out:
3272 return ret;
dea3101e 3273}
3274
da0436e9
JS
3275/**
3276 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3277 * @phba: The Hba for which this call is being executed.
3278 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3279 *
3280 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3281 * field of @lpfc_cmd for device with SLI-4 interface spec.
3282 *
3283 * Return codes:
6c8eea54
JS
3284 * 1 - Error
3285 * 0 - Success
da0436e9
JS
3286 **/
3287static int
3288lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3289{
3290 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3291 struct scatterlist *sgel = NULL;
3292 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3293 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
fedd3b7b 3294 struct sli4_sge *first_data_sgl;
da0436e9
JS
3295 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3296 dma_addr_t physaddr;
3297 uint32_t num_bde = 0;
3298 uint32_t dma_len;
3299 uint32_t dma_offset = 0;
3300 int nseg;
fedd3b7b 3301 struct ulp_bde64 *bde;
da0436e9
JS
3302
3303 /*
3304 * There are three possibilities here - use scatter-gather segment, use
3305 * the single mapping, or neither. Start the lpfc command prep by
3306 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3307 * data bde entry.
3308 */
3309 if (scsi_sg_count(scsi_cmnd)) {
3310 /*
3311 * The driver stores the segment count returned from pci_map_sg
3312 * because this a count of dma-mappings used to map the use_sg
3313 * pages. They are not guaranteed to be the same for those
3314 * architectures that implement an IOMMU.
3315 */
3316
3317 nseg = scsi_dma_map(scsi_cmnd);
3318 if (unlikely(!nseg))
3319 return 1;
3320 sgl += 1;
3321 /* clear the last flag in the fcp_rsp map entry */
3322 sgl->word2 = le32_to_cpu(sgl->word2);
3323 bf_set(lpfc_sli4_sge_last, sgl, 0);
3324 sgl->word2 = cpu_to_le32(sgl->word2);
3325 sgl += 1;
fedd3b7b 3326 first_data_sgl = sgl;
da0436e9
JS
3327 lpfc_cmd->seg_cnt = nseg;
3328 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
3329 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3330 " %s: Too many sg segments from "
3331 "dma_map_sg. Config %d, seg_cnt %d\n",
3332 __func__, phba->cfg_sg_seg_cnt,
da0436e9 3333 lpfc_cmd->seg_cnt);
96f7077f 3334 lpfc_cmd->seg_cnt = 0;
da0436e9
JS
3335 scsi_dma_unmap(scsi_cmnd);
3336 return 1;
3337 }
3338
3339 /*
3340 * The driver established a maximum scatter-gather segment count
3341 * during probe that limits the number of sg elements in any
3342 * single scsi command. Just run through the seg_cnt and format
3343 * the sge's.
3344 * When using SLI-3 the driver will try to fit all the BDEs into
3345 * the IOCB. If it can't then the BDEs get added to a BPL as it
3346 * does for SLI-2 mode.
3347 */
3348 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3349 physaddr = sg_dma_address(sgel);
3350 dma_len = sg_dma_len(sgel);
da0436e9
JS
3351 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3352 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
0558056c 3353 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
3354 if ((num_bde + 1) == nseg)
3355 bf_set(lpfc_sli4_sge_last, sgl, 1);
3356 else
3357 bf_set(lpfc_sli4_sge_last, sgl, 0);
3358 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
f9bb2da1 3359 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
da0436e9 3360 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 3361 sgl->sge_len = cpu_to_le32(dma_len);
da0436e9
JS
3362 dma_offset += dma_len;
3363 sgl++;
3364 }
fedd3b7b
JS
3365 /* setup the performance hint (first data BDE) if enabled */
3366 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3367 bde = (struct ulp_bde64 *)
3368 &(iocb_cmd->unsli3.sli3Words[5]);
3369 bde->addrLow = first_data_sgl->addr_lo;
3370 bde->addrHigh = first_data_sgl->addr_hi;
3371 bde->tus.f.bdeSize =
3372 le32_to_cpu(first_data_sgl->sge_len);
3373 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3374 bde->tus.w = cpu_to_le32(bde->tus.w);
3375 }
da0436e9
JS
3376 } else {
3377 sgl += 1;
3378 /* clear the last flag in the fcp_rsp map entry */
3379 sgl->word2 = le32_to_cpu(sgl->word2);
3380 bf_set(lpfc_sli4_sge_last, sgl, 1);
3381 sgl->word2 = cpu_to_le32(sgl->word2);
3382 }
3383
3384 /*
3385 * Finish initializing those IOCB fields that are dependent on the
3386 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3387 * explicitly reinitialized.
3388 * all iocb memory resources are reused.
3389 */
3390 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3391
3392 /*
3393 * Due to difference in data length between DIF/non-DIF paths,
3394 * we need to set word 4 of IOCB here
3395 */
3396 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3397 return 0;
3398}
3399
acd6859b
JS
3400/**
3401 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3402 * @phba: The Hba for which this call is being executed.
3403 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3404 *
3405 * Adjust the data length to account for how much data
3406 * is actually on the wire.
3407 *
3408 * returns the adjusted data length
3409 **/
3410static int
3411lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3412 struct lpfc_scsi_buf *lpfc_cmd)
3413{
3414 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3415 int diflen, fcpdl;
3416 unsigned blksize;
3417
3418 fcpdl = scsi_bufflen(sc);
3419
3420 /* Check if there is protection data on the wire */
3421 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3422 /* Read */
3423 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3424 return fcpdl;
3425
3426 } else {
3427 /* Write */
3428 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3429 return fcpdl;
3430 }
3431
3432 /* If protection data on the wire, adjust the count accordingly */
3433 blksize = lpfc_cmd_blksize(sc);
3434 diflen = (fcpdl / blksize) * 8;
3435 fcpdl += diflen;
3436 return fcpdl;
3437}
3438
3439/**
3440 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3441 * @phba: The Hba for which this call is being executed.
3442 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3443 *
3444 * This is the protection/DIF aware version of
3445 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3446 * two functions eventually, but for now, it's here
3447 **/
3448static int
3449lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3450 struct lpfc_scsi_buf *lpfc_cmd)
3451{
3452 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3453 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3454 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3455 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
96f7077f 3456 uint32_t num_sge = 0;
acd6859b
JS
3457 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3458 int prot_group_type = 0;
3459 int fcpdl;
3460
3461 /*
3462 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
96f7077f 3463 * fcp_rsp regions to the first data sge entry
acd6859b
JS
3464 */
3465 if (scsi_sg_count(scsi_cmnd)) {
3466 /*
3467 * The driver stores the segment count returned from pci_map_sg
3468 * because this a count of dma-mappings used to map the use_sg
3469 * pages. They are not guaranteed to be the same for those
3470 * architectures that implement an IOMMU.
3471 */
3472 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3473 scsi_sglist(scsi_cmnd),
3474 scsi_sg_count(scsi_cmnd), datadir);
3475 if (unlikely(!datasegcnt))
3476 return 1;
3477
3478 sgl += 1;
3479 /* clear the last flag in the fcp_rsp map entry */
3480 sgl->word2 = le32_to_cpu(sgl->word2);
3481 bf_set(lpfc_sli4_sge_last, sgl, 0);
3482 sgl->word2 = cpu_to_le32(sgl->word2);
3483
3484 sgl += 1;
3485 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
3486
3487 /* First check if data segment count from SCSI Layer is good */
3488 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3489 goto err;
acd6859b
JS
3490
3491 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3492
3493 switch (prot_group_type) {
3494 case LPFC_PG_TYPE_NO_DIF:
96f7077f
JS
3495 /* Here we need to add a DISEED to the count */
3496 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3497 goto err;
3498
3499 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
acd6859b 3500 datasegcnt);
96f7077f 3501
acd6859b 3502 /* we should have 2 or more entries in buffer list */
96f7077f 3503 if (num_sge < 2)
acd6859b
JS
3504 goto err;
3505 break;
96f7077f
JS
3506
3507 case LPFC_PG_TYPE_DIF_BUF:
acd6859b
JS
3508 /*
3509 * This type indicates that protection buffers are
3510 * passed to the driver, so that needs to be prepared
3511 * for DMA
3512 */
3513 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3514 scsi_prot_sglist(scsi_cmnd),
3515 scsi_prot_sg_count(scsi_cmnd), datadir);
3516 if (unlikely(!protsegcnt)) {
3517 scsi_dma_unmap(scsi_cmnd);
3518 return 1;
3519 }
3520
3521 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
3522 /*
3523 * There is a minimun of 3 SGEs used for every
3524 * protection data segment.
3525 */
3526 if ((lpfc_cmd->prot_seg_cnt * 3) >
3527 (phba->cfg_total_seg_cnt - 2))
3528 goto err;
acd6859b 3529
96f7077f 3530 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
acd6859b 3531 datasegcnt, protsegcnt);
96f7077f 3532
acd6859b 3533 /* we should have 3 or more entries in buffer list */
96f7077f
JS
3534 if ((num_sge < 3) ||
3535 (num_sge > phba->cfg_total_seg_cnt))
acd6859b
JS
3536 goto err;
3537 break;
96f7077f 3538
acd6859b
JS
3539 case LPFC_PG_TYPE_INVALID:
3540 default:
96f7077f
JS
3541 scsi_dma_unmap(scsi_cmnd);
3542 lpfc_cmd->seg_cnt = 0;
3543
acd6859b
JS
3544 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3545 "9083 Unexpected protection group %i\n",
3546 prot_group_type);
3547 return 1;
3548 }
3549 }
3550
8012cc38
JS
3551 switch (scsi_get_prot_op(scsi_cmnd)) {
3552 case SCSI_PROT_WRITE_STRIP:
3553 case SCSI_PROT_READ_STRIP:
3554 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3555 break;
3556 case SCSI_PROT_WRITE_INSERT:
3557 case SCSI_PROT_READ_INSERT:
3558 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3559 break;
3560 case SCSI_PROT_WRITE_PASS:
3561 case SCSI_PROT_READ_PASS:
3562 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3563 break;
3564 }
3565
acd6859b
JS
3566 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3567
3568 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3569
3570 /*
3571 * Due to difference in data length between DIF/non-DIF paths,
3572 * we need to set word 4 of IOCB here
3573 */
3574 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
acd6859b
JS
3575
3576 return 0;
3577err:
96f7077f
JS
3578 if (lpfc_cmd->seg_cnt)
3579 scsi_dma_unmap(scsi_cmnd);
3580 if (lpfc_cmd->prot_seg_cnt)
3581 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3582 scsi_prot_sg_count(scsi_cmnd),
3583 scsi_cmnd->sc_data_direction);
3584
acd6859b 3585 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
96f7077f
JS
3586 "9084 Cannot setup S/G List for HBA"
3587 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3588 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3589 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3590 prot_group_type, num_sge);
3591
3592 lpfc_cmd->seg_cnt = 0;
3593 lpfc_cmd->prot_seg_cnt = 0;
acd6859b
JS
3594 return 1;
3595}
3596
3772a991
JS
3597/**
3598 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3599 * @phba: The Hba for which this call is being executed.
3600 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3601 *
3602 * This routine wraps the actual DMA mapping function pointer from the
3603 * lpfc_hba struct.
3604 *
3605 * Return codes:
6c8eea54
JS
3606 * 1 - Error
3607 * 0 - Success
3772a991
JS
3608 **/
3609static inline int
3610lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3611{
3612 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3613}
3614
acd6859b
JS
3615/**
3616 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3617 * using BlockGuard.
3618 * @phba: The Hba for which this call is being executed.
3619 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3620 *
3621 * This routine wraps the actual DMA mapping function pointer from the
3622 * lpfc_hba struct.
3623 *
3624 * Return codes:
3625 * 1 - Error
3626 * 0 - Success
3627 **/
3628static inline int
3629lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3630{
3631 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3632}
3633
ea2151b4 3634/**
3621a710 3635 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
3636 * @phba: Pointer to hba context object.
3637 * @vport: Pointer to vport object.
3638 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3639 * @rsp_iocb: Pointer to response iocb object which reported error.
3640 *
3641 * This function posts an event when there is a SCSI command reporting
3642 * error from the scsi device.
3643 **/
3644static void
3645lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3646 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3647 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3648 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3649 uint32_t resp_info = fcprsp->rspStatus2;
3650 uint32_t scsi_status = fcprsp->rspStatus3;
3651 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3652 struct lpfc_fast_path_event *fast_path_evt = NULL;
3653 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3654 unsigned long flags;
3655
5989b8d4
JS
3656 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3657 return;
3658
ea2151b4
JS
3659 /* If there is queuefull or busy condition send a scsi event */
3660 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3661 (cmnd->result == SAM_STAT_BUSY)) {
3662 fast_path_evt = lpfc_alloc_fast_evt(phba);
3663 if (!fast_path_evt)
3664 return;
3665 fast_path_evt->un.scsi_evt.event_type =
3666 FC_REG_SCSI_EVENT;
3667 fast_path_evt->un.scsi_evt.subcategory =
3668 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3669 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3670 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3671 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3672 &pnode->nlp_portname, sizeof(struct lpfc_name));
3673 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3674 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3675 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3676 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3677 fast_path_evt = lpfc_alloc_fast_evt(phba);
3678 if (!fast_path_evt)
3679 return;
3680 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3681 FC_REG_SCSI_EVENT;
3682 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3683 LPFC_EVENT_CHECK_COND;
3684 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3685 cmnd->device->lun;
3686 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3687 &pnode->nlp_portname, sizeof(struct lpfc_name));
3688 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3689 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3690 fast_path_evt->un.check_cond_evt.sense_key =
3691 cmnd->sense_buffer[2] & 0xf;
3692 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3693 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3694 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3695 fcpi_parm &&
3696 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3697 ((scsi_status == SAM_STAT_GOOD) &&
3698 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3699 /*
3700 * If status is good or resid does not match with fcp_param and
3701 * there is valid fcpi_parm, then there is a read_check error
3702 */
3703 fast_path_evt = lpfc_alloc_fast_evt(phba);
3704 if (!fast_path_evt)
3705 return;
3706 fast_path_evt->un.read_check_error.header.event_type =
3707 FC_REG_FABRIC_EVENT;
3708 fast_path_evt->un.read_check_error.header.subcategory =
3709 LPFC_EVENT_FCPRDCHKERR;
3710 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3711 &pnode->nlp_portname, sizeof(struct lpfc_name));
3712 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3713 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3714 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3715 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3716 fast_path_evt->un.read_check_error.fcpiparam =
3717 fcpi_parm;
3718 } else
3719 return;
3720
3721 fast_path_evt->vport = vport;
3722 spin_lock_irqsave(&phba->hbalock, flags);
3723 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3724 spin_unlock_irqrestore(&phba->hbalock, flags);
3725 lpfc_worker_wake_up(phba);
3726 return;
3727}
9bad7671
JS
3728
3729/**
f1126688 3730 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 3731 * @phba: The HBA for which this call is being executed.
9bad7671
JS
3732 * @psb: The scsi buffer which is going to be un-mapped.
3733 *
3734 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 3735 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 3736 **/
bcf4dbfa 3737static void
f1126688 3738lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
bcf4dbfa
JS
3739{
3740 /*
3741 * There are only two special cases to consider. (1) the scsi command
3742 * requested scatter-gather usage or (2) the scsi command allocated
3743 * a request buffer, but did not request use_sg. There is a third
3744 * case, but it does not require resource deallocation.
3745 */
a0b4f78f
FT
3746 if (psb->seg_cnt > 0)
3747 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
3748 if (psb->prot_seg_cnt > 0)
3749 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3750 scsi_prot_sg_count(psb->pCmd),
3751 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
3752}
3753
9bad7671 3754/**
3621a710 3755 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
3756 * @vport: The virtual port for which this call is being executed.
3757 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3758 * @rsp_iocb: The response IOCB which contains FCP error.
3759 *
3760 * This routine is called to process response IOCB with status field
3761 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3762 * based upon SCSI and FCP error.
3763 **/
dea3101e 3764static void
2e0fef85
JS
3765lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3766 struct lpfc_iocbq *rsp_iocb)
dea3101e 3767{
3768 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3769 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3770 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 3771 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e 3772 uint32_t resp_info = fcprsp->rspStatus2;
3773 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 3774 uint32_t *lp;
dea3101e 3775 uint32_t host_status = DID_OK;
3776 uint32_t rsplen = 0;
c7743956 3777 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 3778
ea2151b4 3779
dea3101e 3780 /*
3781 * If this is a task management command, there is no
3782 * scsi packet associated with this lpfc_cmd. The driver
3783 * consumes it.
3784 */
3785 if (fcpcmd->fcpCntl2) {
3786 scsi_status = 0;
3787 goto out;
3788 }
3789
6a9c52cf
JS
3790 if (resp_info & RSP_LEN_VALID) {
3791 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 3792 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
6a9c52cf
JS
3793 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3794 "2719 Invalid response length: "
3795 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3796 cmnd->device->id,
3797 cmnd->device->lun, cmnd->cmnd[0],
3798 rsplen);
3799 host_status = DID_ERROR;
3800 goto out;
3801 }
e40a02c1
JS
3802 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3803 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3804 "2757 Protocol failure detected during "
3805 "processing of FCP I/O op: "
3806 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3807 cmnd->device->id,
3808 cmnd->device->lun, cmnd->cmnd[0],
3809 fcprsp->rspInfo3);
3810 host_status = DID_ERROR;
3811 goto out;
3812 }
6a9c52cf
JS
3813 }
3814
c7743956
JS
3815 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3816 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3817 if (snslen > SCSI_SENSE_BUFFERSIZE)
3818 snslen = SCSI_SENSE_BUFFERSIZE;
3819
3820 if (resp_info & RSP_LEN_VALID)
3821 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3822 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3823 }
3824 lp = (uint32_t *)cmnd->sense_buffer;
3825
aa1c7ee7
JS
3826 /* special handling for under run conditions */
3827 if (!scsi_status && (resp_info & RESID_UNDER)) {
3828 /* don't log under runs if fcp set... */
3829 if (vport->cfg_log_verbose & LOG_FCP)
3830 logit = LOG_FCP_ERROR;
3831 /* unless operator says so */
3832 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3833 logit = LOG_FCP_UNDER;
3834 }
c7743956 3835
e8b62011 3836 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 3837 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
3838 "Data: x%x x%x x%x x%x x%x\n",
3839 cmnd->cmnd[0], scsi_status,
3840 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3841 be32_to_cpu(fcprsp->rspResId),
3842 be32_to_cpu(fcprsp->rspSnsLen),
3843 be32_to_cpu(fcprsp->rspRspLen),
3844 fcprsp->rspInfo3);
dea3101e 3845
a0b4f78f 3846 scsi_set_resid(cmnd, 0);
dea3101e 3847 if (resp_info & RESID_UNDER) {
a0b4f78f 3848 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 3849
73d91e50 3850 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
e2a0a9d6 3851 "9025 FCP Read Underrun, expected %d, "
e8b62011
JS
3852 "residual %d Data: x%x x%x x%x\n",
3853 be32_to_cpu(fcpcmd->fcpDl),
3854 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3855 cmnd->underflow);
dea3101e 3856
7054a606
JS
3857 /*
3858 * If there is an under run check if under run reported by
3859 * storage array is same as the under run reported by HBA.
3860 * If this is not same, there is a dropped frame.
3861 */
3862 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3863 fcpi_parm &&
a0b4f78f 3864 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
3865 lpfc_printf_vlog(vport, KERN_WARNING,
3866 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3867 "9026 FCP Read Check Error "
e8b62011
JS
3868 "and Underrun Data: x%x x%x x%x x%x\n",
3869 be32_to_cpu(fcpcmd->fcpDl),
3870 scsi_get_resid(cmnd), fcpi_parm,
3871 cmnd->cmnd[0]);
a0b4f78f 3872 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
3873 host_status = DID_ERROR;
3874 }
dea3101e 3875 /*
3876 * The cmnd->underflow is the minimum number of bytes that must
25985edc 3877 * be transferred for this command. Provided a sense condition
dea3101e 3878 * is not present, make sure the actual amount transferred is at
3879 * least the underflow value or fail.
3880 */
3881 if (!(resp_info & SNS_LEN_VALID) &&
3882 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
3883 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3884 < cmnd->underflow)) {
e8b62011 3885 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 3886 "9027 FCP command x%x residual "
e8b62011
JS
3887 "underrun converted to error "
3888 "Data: x%x x%x x%x\n",
66dbfbe6 3889 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 3890 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e 3891 host_status = DID_ERROR;
3892 }
3893 } else if (resp_info & RESID_OVER) {
e8b62011 3894 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 3895 "9028 FCP command x%x residual overrun error. "
e4e74273 3896 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 3897 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e 3898 host_status = DID_ERROR;
3899
3900 /*
3901 * Check SLI validation that all the transfer was actually done
582dd796 3902 * (fcpi_parm should be zero).
dea3101e 3903 */
582dd796 3904 } else if (fcpi_parm) {
e8b62011 3905 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
582dd796 3906 "9029 FCP Data Transfer Check Error: "
eee8877e 3907 "x%x x%x x%x x%x x%x\n",
e8b62011
JS
3908 be32_to_cpu(fcpcmd->fcpDl),
3909 be32_to_cpu(fcprsp->rspResId),
eee8877e
JS
3910 fcpi_parm, cmnd->cmnd[0], scsi_status);
3911 switch (scsi_status) {
3912 case SAM_STAT_GOOD:
3913 case SAM_STAT_CHECK_CONDITION:
3914 /* Fabric dropped a data frame. Fail any successful
3915 * command in which we detected dropped frames.
3916 * A status of good or some check conditions could
3917 * be considered a successful command.
3918 */
3919 host_status = DID_ERROR;
3920 break;
3921 }
a0b4f78f 3922 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e 3923 }
3924
3925 out:
3926 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 3927 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e 3928}
3929
9bad7671 3930/**
3621a710 3931 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
3932 * @phba: The Hba for which this call is being executed.
3933 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 3934 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
3935 *
3936 * This routine assigns scsi command result by looking into response IOCB
3937 * status field appropriately. This routine handles QUEUE FULL condition as
3938 * well by ramping down device queue depth.
3939 **/
dea3101e 3940static void
3941lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3942 struct lpfc_iocbq *pIocbOut)
3943{
3944 struct lpfc_scsi_buf *lpfc_cmd =
3945 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 3946 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e 3947 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3948 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 3949 struct scsi_cmnd *cmd;
445cf4f4 3950 int result;
a257bf90 3951 struct scsi_device *tmp_sdev;
5ffc266e 3952 int depth;
fa61a54e 3953 unsigned long flags;
ea2151b4 3954 struct lpfc_fast_path_event *fast_path_evt;
75baf696 3955 struct Scsi_Host *shost;
a257bf90 3956 uint32_t queue_depth, scsi_id;
73d91e50 3957 uint32_t logit = LOG_FCP;
dea3101e 3958
75baf696
JS
3959 /* Sanity check on return of outstanding command */
3960 if (!(lpfc_cmd->pCmd))
3961 return;
3962 cmd = lpfc_cmd->pCmd;
3963 shost = cmd->device->host;
3964
e3d2b802 3965 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
dea3101e 3966 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
341af102
JS
3967 /* pick up SLI4 exhange busy status from HBA */
3968 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3969
9a6b09c0
JS
3970#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3971 if (lpfc_cmd->prot_data_type) {
3972 struct scsi_dif_tuple *src = NULL;
3973
3974 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3975 /*
3976 * Used to restore any changes to protection
3977 * data for error injection.
3978 */
3979 switch (lpfc_cmd->prot_data_type) {
3980 case LPFC_INJERR_REFTAG:
3981 src->ref_tag =
3982 lpfc_cmd->prot_data;
3983 break;
3984 case LPFC_INJERR_APPTAG:
3985 src->app_tag =
3986 (uint16_t)lpfc_cmd->prot_data;
3987 break;
3988 case LPFC_INJERR_GUARD:
3989 src->guard_tag =
3990 (uint16_t)lpfc_cmd->prot_data;
3991 break;
3992 default:
3993 break;
3994 }
3995
3996 lpfc_cmd->prot_data = 0;
3997 lpfc_cmd->prot_data_type = 0;
3998 lpfc_cmd->prot_data_segment = NULL;
3999 }
4000#endif
109f6ed0
JS
4001 if (pnode && NLP_CHK_NODE_ACT(pnode))
4002 atomic_dec(&pnode->cmd_pending);
dea3101e 4003
4004 if (lpfc_cmd->status) {
4005 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4006 (lpfc_cmd->result & IOERR_DRVR_MASK))
4007 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4008 else if (lpfc_cmd->status >= IOSTAT_CNT)
4009 lpfc_cmd->status = IOSTAT_DEFAULT;
aa1c7ee7
JS
4010 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4011 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4012 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4013 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
73d91e50
JS
4014 logit = 0;
4015 else
4016 logit = LOG_FCP | LOG_FCP_UNDER;
4017 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4018 "9030 FCP cmd x%x failed <%d/%d> "
5a0d80fc
JS
4019 "status: x%x result: x%x "
4020 "sid: x%x did: x%x oxid: x%x "
4021 "Data: x%x x%x\n",
73d91e50
JS
4022 cmd->cmnd[0],
4023 cmd->device ? cmd->device->id : 0xffff,
4024 cmd->device ? cmd->device->lun : 0xffff,
4025 lpfc_cmd->status, lpfc_cmd->result,
5a0d80fc
JS
4026 vport->fc_myDID, pnode->nlp_DID,
4027 phba->sli_rev == LPFC_SLI_REV4 ?
4028 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
73d91e50
JS
4029 pIocbOut->iocb.ulpContext,
4030 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e 4031
4032 switch (lpfc_cmd->status) {
4033 case IOSTAT_FCP_RSP_ERROR:
4034 /* Call FCP RSP handler to determine result */
2e0fef85 4035 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e 4036 break;
4037 case IOSTAT_NPORT_BSY:
4038 case IOSTAT_FABRIC_BSY:
0f1f53a7 4039 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
4040 fast_path_evt = lpfc_alloc_fast_evt(phba);
4041 if (!fast_path_evt)
4042 break;
4043 fast_path_evt->un.fabric_evt.event_type =
4044 FC_REG_FABRIC_EVENT;
4045 fast_path_evt->un.fabric_evt.subcategory =
4046 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4047 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4048 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4049 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4050 &pnode->nlp_portname,
4051 sizeof(struct lpfc_name));
4052 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4053 &pnode->nlp_nodename,
4054 sizeof(struct lpfc_name));
4055 }
4056 fast_path_evt->vport = vport;
4057 fast_path_evt->work_evt.evt =
4058 LPFC_EVT_FASTPATH_MGMT_EVT;
4059 spin_lock_irqsave(&phba->hbalock, flags);
4060 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4061 &phba->work_list);
4062 spin_unlock_irqrestore(&phba->hbalock, flags);
4063 lpfc_worker_wake_up(phba);
dea3101e 4064 break;
92d7f7b0 4065 case IOSTAT_LOCAL_REJECT:
1151e3ec 4066 case IOSTAT_REMOTE_STOP:
ab56dc2e
JS
4067 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4068 lpfc_cmd->result ==
4069 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4070 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4071 lpfc_cmd->result ==
4072 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4073 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
4074 break;
4075 }
d7c255b2 4076 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 4077 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
4078 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4079 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
92d7f7b0 4080 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb 4081 break;
e2a0a9d6 4082 }
e2a0a9d6
JS
4083 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4084 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4085 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4086 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4087 /*
4088 * This is a response for a BG enabled
4089 * cmd. Parse BG error
4090 */
4091 lpfc_parse_bg_err(phba, lpfc_cmd,
4092 pIocbOut);
4093 break;
4094 } else {
4095 lpfc_printf_vlog(vport, KERN_WARNING,
4096 LOG_BG,
4097 "9031 non-zero BGSTAT "
6a9c52cf 4098 "on unprotected cmd\n");
e2a0a9d6
JS
4099 }
4100 }
1151e3ec
JS
4101 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4102 && (phba->sli_rev == LPFC_SLI_REV4)
4103 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
4104 /* This IO was aborted by the target, we don't
4105 * know the rxid and because we did not send the
4106 * ABTS we cannot generate and RRQ.
4107 */
4108 lpfc_set_rrq_active(phba, pnode,
ee0f4fe1
JS
4109 lpfc_cmd->cur_iocbq.sli4_lxritag,
4110 0, 0);
1151e3ec 4111 }
e2a0a9d6 4112 /* else: fall through */
dea3101e 4113 default:
4114 cmd->result = ScsiResult(DID_ERROR, 0);
4115 break;
4116 }
4117
58da1ffb 4118 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 4119 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
4120 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
4121 SAM_STAT_BUSY);
ab56dc2e 4122 } else
dea3101e 4123 cmd->result = ScsiResult(DID_OK, 0);
dea3101e 4124
4125 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4126 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4127
e8b62011
JS
4128 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4129 "0710 Iodone <%d/%d> cmd %p, error "
4130 "x%x SNS x%x x%x Data: x%x x%x\n",
4131 cmd->device->id, cmd->device->lun, cmd,
4132 cmd->result, *lp, *(lp + 3), cmd->retries,
4133 scsi_get_resid(cmd));
dea3101e 4134 }
4135
ea2151b4 4136 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4 4137 result = cmd->result;
977b5a0a
JS
4138 if (vport->cfg_max_scsicmpl_time &&
4139 time_after(jiffies, lpfc_cmd->start_time +
4140 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 4141 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
4142 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4143 if (pnode->cmd_qdepth >
4144 atomic_read(&pnode->cmd_pending) &&
4145 (atomic_read(&pnode->cmd_pending) >
4146 LPFC_MIN_TGT_QDEPTH) &&
4147 ((cmd->cmnd[0] == READ_10) ||
4148 (cmd->cmnd[0] == WRITE_10)))
4149 pnode->cmd_qdepth =
4150 atomic_read(&pnode->cmd_pending);
4151
4152 pnode->last_change_time = jiffies;
4153 }
a257bf90 4154 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 4155 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
7dc517df 4156 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
977b5a0a 4157 time_after(jiffies, pnode->last_change_time +
109f6ed0 4158 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
a257bf90 4159 spin_lock_irqsave(shost->host_lock, flags);
7dc517df
JS
4160 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
4161 / 100;
4162 depth = depth ? depth : 1;
4163 pnode->cmd_qdepth += depth;
4164 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
4165 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
109f6ed0 4166 pnode->last_change_time = jiffies;
a257bf90 4167 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 4168 }
977b5a0a
JS
4169 }
4170
1dcb58e5 4171 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90
JS
4172
4173 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4174 queue_depth = cmd->device->queue_depth;
4175 scsi_id = cmd->device->id;
0bd4ca25
JSEC
4176 cmd->scsi_done(cmd);
4177
b808608b 4178 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
876dd7d0 4179 spin_lock_irqsave(&phba->hbalock, flags);
92e3af66 4180 lpfc_cmd->pCmd = NULL;
876dd7d0 4181 spin_unlock_irqrestore(&phba->hbalock, flags);
92e3af66 4182
fa61a54e
JS
4183 /*
4184 * If there is a thread waiting for command completion
4185 * wake up the thread.
4186 */
a257bf90 4187 spin_lock_irqsave(shost->host_lock, flags);
fa61a54e
JS
4188 if (lpfc_cmd->waitq)
4189 wake_up(lpfc_cmd->waitq);
a257bf90 4190 spin_unlock_irqrestore(shost->host_lock, flags);
b808608b
JW
4191 lpfc_release_scsi_buf(phba, lpfc_cmd);
4192 return;
4193 }
4194
92d7f7b0 4195 if (!result)
a257bf90 4196 lpfc_rampup_queue_depth(vport, queue_depth);
92d7f7b0 4197
445cf4f4
JSEC
4198 /*
4199 * Check for queue full. If the lun is reporting queue full, then
4200 * back off the lun queue depth to prevent target overloads.
4201 */
58da1ffb
JS
4202 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
4203 NLP_CHK_NODE_ACT(pnode)) {
a257bf90
JS
4204 shost_for_each_device(tmp_sdev, shost) {
4205 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
4206 continue;
4207 depth = scsi_track_queue_full(tmp_sdev,
5ffc266e
JS
4208 tmp_sdev->queue_depth-1);
4209 if (depth <= 0)
4210 continue;
e8b62011
JS
4211 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4212 "0711 detected queue full - lun queue "
4213 "depth adjusted to %d.\n", depth);
ea2151b4 4214 lpfc_send_sdev_queuedepth_change_event(phba, vport,
5ffc266e
JS
4215 pnode,
4216 tmp_sdev->lun,
4217 depth+1, depth);
445cf4f4
JSEC
4218 }
4219 }
4220
876dd7d0 4221 spin_lock_irqsave(&phba->hbalock, flags);
92e3af66 4222 lpfc_cmd->pCmd = NULL;
876dd7d0 4223 spin_unlock_irqrestore(&phba->hbalock, flags);
92e3af66 4224
fa61a54e
JS
4225 /*
4226 * If there is a thread waiting for command completion
4227 * wake up the thread.
4228 */
a257bf90 4229 spin_lock_irqsave(shost->host_lock, flags);
fa61a54e
JS
4230 if (lpfc_cmd->waitq)
4231 wake_up(lpfc_cmd->waitq);
a257bf90 4232 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 4233
0bd4ca25 4234 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 4235}
4236
34b02dcd 4237/**
3621a710 4238 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
34b02dcd
JS
4239 * @data: A pointer to the immediate command data portion of the IOCB.
4240 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4241 *
4242 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4243 * byte swapping the data to big endian format for transmission on the wire.
4244 **/
4245static void
4246lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4247{
4248 int i, j;
4249 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4250 i += sizeof(uint32_t), j++) {
4251 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4252 }
4253}
4254
9bad7671 4255/**
f1126688 4256 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
9bad7671
JS
4257 * @vport: The virtual port for which this call is being executed.
4258 * @lpfc_cmd: The scsi command which needs to send.
4259 * @pnode: Pointer to lpfc_nodelist.
4260 *
4261 * This routine initializes fcp_cmnd and iocb data structure from scsi command
3772a991 4262 * to transfer for device with SLI3 interface spec.
9bad7671 4263 **/
dea3101e 4264static void
f1126688 4265lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2e0fef85 4266 struct lpfc_nodelist *pnode)
dea3101e 4267{
2e0fef85 4268 struct lpfc_hba *phba = vport->phba;
dea3101e 4269 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4270 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4271 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4272 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4273 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 4274 char tag[2];
027140ea
JS
4275 uint8_t *ptr;
4276 bool sli4;
dea3101e 4277
58da1ffb
JS
4278 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4279 return;
4280
dea3101e 4281 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
4282 /* clear task management bits */
4283 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 4284
91886523
JSEC
4285 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4286 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 4287
027140ea
JS
4288 ptr = &fcp_cmnd->fcpCdb[0];
4289 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4290 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4291 ptr += scsi_cmnd->cmd_len;
4292 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4293 }
4294
7e2b19fb
JS
4295 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
4296 switch (tag[0]) {
dea3101e 4297 case HEAD_OF_QUEUE_TAG:
4298 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4299 break;
4300 case ORDERED_QUEUE_TAG:
4301 fcp_cmnd->fcpCntl1 = ORDERED_Q;
4302 break;
4303 default:
4304 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4305 break;
4306 }
4307 } else
fe8f7f9c 4308 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
dea3101e 4309
027140ea
JS
4310 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4311
dea3101e 4312 /*
4313 * There are three possibilities here - use scatter-gather segment, use
4314 * the single mapping, or neither. Start the lpfc command prep by
4315 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4316 * data bde entry.
4317 */
a0b4f78f 4318 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 4319 if (datadir == DMA_TO_DEVICE) {
4320 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
027140ea
JS
4321 if (sli4)
4322 iocb_cmd->ulpPU = PARM_READ_CHECK;
4323 else {
3772a991
JS
4324 iocb_cmd->un.fcpi.fcpi_parm = 0;
4325 iocb_cmd->ulpPU = 0;
027140ea 4326 }
dea3101e 4327 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4328 phba->fc4OutputRequests++;
4329 } else {
4330 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4331 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 4332 fcp_cmnd->fcpCntl3 = READ_DATA;
4333 phba->fc4InputRequests++;
4334 }
4335 } else {
4336 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4337 iocb_cmd->un.fcpi.fcpi_parm = 0;
4338 iocb_cmd->ulpPU = 0;
4339 fcp_cmnd->fcpCntl3 = 0;
4340 phba->fc4ControlRequests++;
4341 }
e2a0a9d6
JS
4342 if (phba->sli_rev == 3 &&
4343 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 4344 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4345 /*
4346 * Finish initializing those IOCB fields that are independent
4347 * of the scsi_cmnd request_buffer
4348 */
4349 piocbq->iocb.ulpContext = pnode->nlp_rpi;
027140ea 4350 if (sli4)
6d368e53
JS
4351 piocbq->iocb.ulpContext =
4352 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
dea3101e 4353 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4354 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
4355 else
4356 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e 4357
4358 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4359 piocbq->context1 = lpfc_cmd;
4360 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4361 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 4362 piocbq->vport = vport;
dea3101e 4363}
4364
da0436e9 4365/**
6d368e53 4366 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671
JS
4367 * @vport: The virtual port for which this call is being executed.
4368 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4369 * @lun: Logical unit number.
4370 * @task_mgmt_cmd: SCSI task management command.
4371 *
3772a991
JS
4372 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4373 * for device with SLI-3 interface spec.
9bad7671
JS
4374 *
4375 * Return codes:
4376 * 0 - Error
4377 * 1 - Success
4378 **/
dea3101e 4379static int
f1126688 4380lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 4381 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 4382 unsigned int lun,
dea3101e 4383 uint8_t task_mgmt_cmd)
4384{
dea3101e 4385 struct lpfc_iocbq *piocbq;
4386 IOCB_t *piocb;
4387 struct fcp_cmnd *fcp_cmnd;
0b18ac42 4388 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e 4389 struct lpfc_nodelist *ndlp = rdata->pnode;
4390
58da1ffb
JS
4391 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4392 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 4393 return 0;
dea3101e 4394
dea3101e 4395 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
4396 piocbq->vport = vport;
4397
dea3101e 4398 piocb = &piocbq->iocb;
4399
4400 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
4401 /* Clear out any old data in the FCP command area */
4402 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4403 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 4404 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
4405 if (vport->phba->sli_rev == 3 &&
4406 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 4407 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4408 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 4409 piocb->ulpContext = ndlp->nlp_rpi;
6d368e53
JS
4410 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4411 piocb->ulpContext =
4412 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4413 }
dea3101e 4414 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4415 piocb->ulpFCP2Rcvy = 1;
4416 }
4417 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4418
4419 /* ulpTimeout is only one byte */
4420 if (lpfc_cmd->timeout > 0xff) {
4421 /*
4422 * Do not timeout the command at the firmware level.
4423 * The driver will provide the timeout mechanism.
4424 */
4425 piocb->ulpTimeout = 0;
f1126688 4426 } else
dea3101e 4427 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 4428
f1126688
JS
4429 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4430 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 4431
f1126688 4432 return 1;
3772a991
JS
4433}
4434
4435/**
25985edc 4436 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
3772a991
JS
4437 * @phba: The hba struct for which this call is being executed.
4438 * @dev_grp: The HBA PCI-Device group number.
4439 *
4440 * This routine sets up the SCSI interface API function jump table in @phba
4441 * struct.
4442 * Returns: 0 - success, -ENODEV - failure.
4443 **/
4444int
4445lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4446{
4447
f1126688
JS
4448 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4449 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
f1126688 4450
3772a991
JS
4451 switch (dev_grp) {
4452 case LPFC_PCI_DEV_LP:
4453 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4454 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
acd6859b 4455 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3772a991 4456 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
19ca7609 4457 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
3772a991 4458 break;
da0436e9
JS
4459 case LPFC_PCI_DEV_OC:
4460 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4461 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
acd6859b 4462 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
da0436e9 4463 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
19ca7609 4464 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
da0436e9 4465 break;
3772a991
JS
4466 default:
4467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4468 "1418 Invalid HBA PCI-device group: 0x%x\n",
4469 dev_grp);
4470 return -ENODEV;
4471 break;
4472 }
3772a991 4473 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 4474 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
4475 return 0;
4476}
4477
9bad7671 4478/**
3621a710 4479 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
4480 * @phba: The Hba for which this call is being executed.
4481 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4482 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4483 *
4484 * This routine is IOCB completion routine for device reset and target reset
4485 * routine. This routine release scsi buffer associated with lpfc_cmd.
4486 **/
7054a606
JS
4487static void
4488lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4489 struct lpfc_iocbq *cmdiocbq,
4490 struct lpfc_iocbq *rspiocbq)
4491{
4492 struct lpfc_scsi_buf *lpfc_cmd =
4493 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4494 if (lpfc_cmd)
4495 lpfc_release_scsi_buf(phba, lpfc_cmd);
4496 return;
4497}
4498
9bad7671 4499/**
3621a710 4500 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
4501 * @host: The scsi host for which this call is being executed.
4502 *
4503 * This routine provides module information about hba.
4504 *
4505 * Reutrn code:
4506 * Pointer to char - Success.
4507 **/
dea3101e 4508const char *
4509lpfc_info(struct Scsi_Host *host)
4510{
2e0fef85
JS
4511 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4512 struct lpfc_hba *phba = vport->phba;
8b68cd52 4513 int len, link_speed = 0;
dea3101e 4514 static char lpfcinfobuf[384];
4515
4516 memset(lpfcinfobuf,0,384);
4517 if (phba && phba->pcidev){
4518 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4519 len = strlen(lpfcinfobuf);
4520 snprintf(lpfcinfobuf + len,
4521 384-len,
4522 " on PCI bus %02x device %02x irq %d",
4523 phba->pcidev->bus->number,
4524 phba->pcidev->devfn,
4525 phba->pcidev->irq);
4526 len = strlen(lpfcinfobuf);
4527 if (phba->Port[0]) {
4528 snprintf(lpfcinfobuf + len,
4529 384-len,
4530 " port %s",
4531 phba->Port);
4532 }
65467b6b 4533 len = strlen(lpfcinfobuf);
8b68cd52
JS
4534 if (phba->sli_rev <= LPFC_SLI_REV3) {
4535 link_speed = lpfc_sli_port_speed_get(phba);
4536 } else {
4537 if (phba->sli4_hba.link_state.logical_speed)
4538 link_speed =
4539 phba->sli4_hba.link_state.logical_speed;
4540 else
4541 link_speed = phba->sli4_hba.link_state.speed;
65467b6b 4542 }
8b68cd52
JS
4543 if (link_speed != 0)
4544 snprintf(lpfcinfobuf + len, 384-len,
4545 " Logical Link Speed: %d Mbps", link_speed);
dea3101e 4546 }
4547 return lpfcinfobuf;
4548}
4549
9bad7671 4550/**
3621a710 4551 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
4552 * @phba: The Hba for which this call is being executed.
4553 *
4554 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4555 * The default value of cfg_poll_tmo is 10 milliseconds.
4556 **/
875fbdfe
JSEC
4557static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4558{
4559 unsigned long poll_tmo_expires =
4560 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4561
0e9bb8d7 4562 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
875fbdfe
JSEC
4563 mod_timer(&phba->fcp_poll_timer,
4564 poll_tmo_expires);
4565}
4566
9bad7671 4567/**
3621a710 4568 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
4569 * @phba: The Hba for which this call is being executed.
4570 *
4571 * This routine starts the fcp_poll_timer of @phba.
4572 **/
875fbdfe
JSEC
4573void lpfc_poll_start_timer(struct lpfc_hba * phba)
4574{
4575 lpfc_poll_rearm_timer(phba);
4576}
4577
9bad7671 4578/**
3621a710 4579 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
4580 * @ptr: Map to lpfc_hba data structure pointer.
4581 *
4582 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4583 * and FCP Ring interrupt is disable.
4584 **/
4585
875fbdfe
JSEC
4586void lpfc_poll_timeout(unsigned long ptr)
4587{
2e0fef85 4588 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
4589
4590 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4591 lpfc_sli_handle_fast_ring_event(phba,
4592 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4593
875fbdfe
JSEC
4594 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4595 lpfc_poll_rearm_timer(phba);
4596 }
875fbdfe
JSEC
4597}
4598
9bad7671 4599/**
3621a710 4600 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
4601 * @cmnd: Pointer to scsi_cmnd data structure.
4602 * @done: Pointer to done routine.
4603 *
4604 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4605 * This routine prepares an IOCB from scsi command and provides to firmware.
4606 * The @done callback is invoked after driver finished processing the command.
4607 *
4608 * Return value :
4609 * 0 - Success
4610 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4611 **/
dea3101e 4612static int
b9a7c631 4613lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
dea3101e 4614{
2e0fef85
JS
4615 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4616 struct lpfc_hba *phba = vport->phba;
dea3101e 4617 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4618 struct lpfc_nodelist *ndlp;
0bd4ca25 4619 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 4620 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 4621 int err;
dea3101e 4622
19a7b4ae
JSEC
4623 err = fc_remote_port_chkready(rport);
4624 if (err) {
4625 cmnd->result = err;
dea3101e 4626 goto out_fail_command;
4627 }
1c6f4ef5 4628 ndlp = rdata->pnode;
dea3101e 4629
bf08611b 4630 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
acd6859b 4631 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
e2a0a9d6 4632
6a9c52cf
JS
4633 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4634 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4635 " op:%02x str=%s without registering for"
4636 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
4637 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4638 dif_op_str[scsi_get_prot_op(cmnd)]);
4639 goto out_fail_command;
4640 }
4641
dea3101e 4642 /*
19a7b4ae
JSEC
4643 * Catch race where our node has transitioned, but the
4644 * transport is still transitioning.
dea3101e 4645 */
6b415f5d
JS
4646 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4647 goto out_tgt_busy;
7dc517df 4648 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
3496343d 4649 goto out_tgt_busy;
a93ce024 4650
19ca7609 4651 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
dea3101e 4652 if (lpfc_cmd == NULL) {
eaf15d5b 4653 lpfc_rampdown_queue_depth(phba);
92d7f7b0 4654
e8b62011
JS
4655 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4656 "0707 driver's buffer pool is empty, "
4657 "IO busied\n");
dea3101e 4658 goto out_host_busy;
4659 }
4660
4661 /*
4662 * Store the midlayer's command structure for the completion phase
4663 * and complete the command initialization.
4664 */
4665 lpfc_cmd->pCmd = cmnd;
4666 lpfc_cmd->rdata = rdata;
4667 lpfc_cmd->timeout = 0;
977b5a0a 4668 lpfc_cmd->start_time = jiffies;
dea3101e 4669 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
dea3101e 4670
e2a0a9d6 4671 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf 4672 if (vport->phba->cfg_enable_bg) {
737d4248
JS
4673 lpfc_printf_vlog(vport,
4674 KERN_INFO, LOG_SCSI_CMD,
2613470a
JS
4675 "9033 BLKGRD: rcvd %s cmd:x%x "
4676 "sector x%llx cnt %u pt %x\n",
4677 dif_op_str[scsi_get_prot_op(cmnd)],
4678 cmnd->cmnd[0],
4679 (unsigned long long)scsi_get_lba(cmnd),
4680 blk_rq_sectors(cmnd->request),
4681 (cmnd->cmnd[1]>>5));
6a9c52cf 4682 }
e2a0a9d6
JS
4683 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4684 } else {
6a9c52cf 4685 if (vport->phba->cfg_enable_bg) {
737d4248
JS
4686 lpfc_printf_vlog(vport,
4687 KERN_INFO, LOG_SCSI_CMD,
2613470a
JS
4688 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4689 "x%x sector x%llx cnt %u pt %x\n",
4690 cmnd->cmnd[0],
4691 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0 4692 blk_rq_sectors(cmnd->request),
2613470a 4693 (cmnd->cmnd[1]>>5));
6a9c52cf 4694 }
e2a0a9d6
JS
4695 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4696 }
4697
dea3101e 4698 if (err)
4699 goto out_host_busy_free_buf;
4700
2e0fef85 4701 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 4702
977b5a0a 4703 atomic_inc(&ndlp->cmd_pending);
3772a991 4704 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
92d7f7b0 4705 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b
JS
4706 if (err) {
4707 atomic_dec(&ndlp->cmd_pending);
dea3101e 4708 goto out_host_busy_free_buf;
eaf15d5b 4709 }
875fbdfe 4710 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4711 lpfc_sli_handle_fast_ring_event(phba,
4712 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4713
875fbdfe
JSEC
4714 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4715 lpfc_poll_rearm_timer(phba);
4716 }
4717
dea3101e 4718 return 0;
4719
4720 out_host_busy_free_buf:
bcf4dbfa 4721 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 4722 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 4723 out_host_busy:
4724 return SCSI_MLQUEUE_HOST_BUSY;
4725
3496343d
MC
4726 out_tgt_busy:
4727 return SCSI_MLQUEUE_TARGET_BUSY;
4728
dea3101e 4729 out_fail_command:
b9a7c631 4730 cmnd->scsi_done(cmnd);
dea3101e 4731 return 0;
4732}
4733
f281233d 4734
9bad7671 4735/**
3621a710 4736 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
4737 * @cmnd: Pointer to scsi_cmnd data structure.
4738 *
4739 * This routine aborts @cmnd pending in base driver.
4740 *
4741 * Return code :
4742 * 0x2003 - Error
4743 * 0x2002 - Success
4744 **/
dea3101e 4745static int
63c59c3b 4746lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 4747{
2e0fef85
JS
4748 struct Scsi_Host *shost = cmnd->device->host;
4749 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4750 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
4751 struct lpfc_iocbq *iocb;
4752 struct lpfc_iocbq *abtsiocb;
dea3101e 4753 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 4754 IOCB_t *cmd, *icmd;
3a70730a 4755 int ret = SUCCESS, status = 0;
876dd7d0 4756 unsigned long flags;
fa61a54e 4757 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 4758
3a70730a 4759 status = fc_block_scsi_eh(cmnd);
908e18e4 4760 if (status != 0 && status != SUCCESS)
3a70730a 4761 return status;
4f2e66c6 4762
876dd7d0 4763 spin_lock_irqsave(&phba->hbalock, flags);
4f2e66c6
JS
4764 /* driver queued commands are in process of being flushed */
4765 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
876dd7d0 4766 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6
JS
4767 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4768 "3168 SCSI Layer abort requested I/O has been "
4769 "flushed by LLD.\n");
4770 return FAILED;
4771 }
4772
0bd4ca25 4773 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
92e3af66 4774 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
876dd7d0 4775 spin_unlock_irqrestore(&phba->hbalock, flags);
eee8877e
JS
4776 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4777 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5cd049a5 4778 "x%x ID %d LUN %d\n",
3a70730a 4779 SUCCESS, cmnd->device->id, cmnd->device->lun);
eee8877e
JS
4780 return SUCCESS;
4781 }
dea3101e 4782
4f2e66c6
JS
4783 iocb = &lpfc_cmd->cur_iocbq;
4784 /* the command is in process of being cancelled */
4785 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
876dd7d0 4786 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6
JS
4787 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4788 "3169 SCSI Layer abort requested I/O has been "
4789 "cancelled by LLD.\n");
4790 return FAILED;
4791 }
0bd4ca25
JSEC
4792 /*
4793 * If pCmd field of the corresponding lpfc_scsi_buf structure
4794 * points to a different SCSI command, then the driver has
4795 * already completed this command, but the midlayer did not
4f2e66c6 4796 * see the completion before the eh fired. Just return SUCCESS.
0bd4ca25 4797 */
4f2e66c6
JS
4798 if (lpfc_cmd->pCmd != cmnd) {
4799 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4800 "3170 SCSI Layer abort requested I/O has been "
4801 "completed by LLD.\n");
4802 goto out_unlock;
4803 }
dea3101e 4804
0bd4ca25 4805 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 4806
4f2e66c6 4807 abtsiocb = __lpfc_sli_get_iocbq(phba);
0bd4ca25
JSEC
4808 if (abtsiocb == NULL) {
4809 ret = FAILED;
4f2e66c6 4810 goto out_unlock;
dea3101e 4811 }
4812
dea3101e 4813 /*
0bd4ca25
JSEC
4814 * The scsi command can not be in txq and it is in flight because the
4815 * pCmd is still pointig at the SCSI command we have to abort. There
4816 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 4817 */
dea3101e 4818
0bd4ca25
JSEC
4819 cmd = &iocb->iocb;
4820 icmd = &abtsiocb->iocb;
4821 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4822 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3772a991
JS
4823 if (phba->sli_rev == LPFC_SLI_REV4)
4824 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4825 else
4826 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 4827
0bd4ca25
JSEC
4828 icmd->ulpLe = 1;
4829 icmd->ulpClass = cmd->ulpClass;
5ffc266e
JS
4830
4831 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4832 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
341af102 4833 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 4834
2e0fef85 4835 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
4836 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4837 else
4838 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 4839
0bd4ca25 4840 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 4841 abtsiocb->vport = vport;
4f2e66c6 4842 /* no longer need the lock after this point */
876dd7d0 4843 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6 4844
3772a991
JS
4845 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4846 IOCB_ERROR) {
0bd4ca25
JSEC
4847 lpfc_sli_release_iocbq(phba, abtsiocb);
4848 ret = FAILED;
4849 goto out;
4850 }
dea3101e 4851
875fbdfe 4852 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190
JS
4853 lpfc_sli_handle_fast_ring_event(phba,
4854 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 4855
fa61a54e 4856 lpfc_cmd->waitq = &waitq;
0bd4ca25 4857 /* Wait for abort to complete */
fa61a54e
JS
4858 wait_event_timeout(waitq,
4859 (lpfc_cmd->pCmd != cmnd),
256ec0d0 4860 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
fa61a54e 4861 lpfc_cmd->waitq = NULL;
dea3101e 4862
0bd4ca25
JSEC
4863 if (lpfc_cmd->pCmd == cmnd) {
4864 ret = FAILED;
e8b62011
JS
4865 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4866 "0748 abort handler timed out waiting "
247ca945
JS
4867 "for abortng I/O (xri:x%x) to complete: "
4868 "ret %#x, ID %d, LUN %d\n",
4869 iocb->sli4_xritag, ret,
4870 cmnd->device->id, cmnd->device->lun);
dea3101e 4871 }
4f2e66c6 4872 goto out;
dea3101e 4873
4f2e66c6 4874out_unlock:
876dd7d0 4875 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6 4876out:
e8b62011
JS
4877 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4878 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5cd049a5
CH
4879 "LUN %d\n", ret, cmnd->device->id,
4880 cmnd->device->lun);
63c59c3b 4881 return ret;
8fa728a2
JG
4882}
4883
bbb9d180
JS
4884static char *
4885lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4886{
4887 switch (task_mgmt_cmd) {
4888 case FCP_ABORT_TASK_SET:
4889 return "ABORT_TASK_SET";
4890 case FCP_CLEAR_TASK_SET:
4891 return "FCP_CLEAR_TASK_SET";
4892 case FCP_BUS_RESET:
4893 return "FCP_BUS_RESET";
4894 case FCP_LUN_RESET:
4895 return "FCP_LUN_RESET";
4896 case FCP_TARGET_RESET:
4897 return "FCP_TARGET_RESET";
4898 case FCP_CLEAR_ACA:
4899 return "FCP_CLEAR_ACA";
4900 case FCP_TERMINATE_TASK:
4901 return "FCP_TERMINATE_TASK";
4902 default:
4903 return "unknown";
4904 }
4905}
4906
9bad7671 4907/**
bbb9d180
JS
4908 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4909 * @vport: The virtual port for which this call is being executed.
4910 * @rdata: Pointer to remote port local data
4911 * @tgt_id: Target ID of remote device.
4912 * @lun_id: Lun number for the TMF
4913 * @task_mgmt_cmd: type of TMF to send
9bad7671 4914 *
bbb9d180
JS
4915 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4916 * a remote port.
9bad7671 4917 *
bbb9d180
JS
4918 * Return Code:
4919 * 0x2003 - Error
4920 * 0x2002 - Success.
9bad7671 4921 **/
dea3101e 4922static int
bbb9d180
JS
4923lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4924 unsigned tgt_id, unsigned int lun_id,
4925 uint8_t task_mgmt_cmd)
dea3101e 4926{
2e0fef85 4927 struct lpfc_hba *phba = vport->phba;
0bd4ca25 4928 struct lpfc_scsi_buf *lpfc_cmd;
bbb9d180
JS
4929 struct lpfc_iocbq *iocbq;
4930 struct lpfc_iocbq *iocbqrsp;
5989b8d4 4931 struct lpfc_nodelist *pnode = rdata->pnode;
bbb9d180 4932 int ret;
915caaaf 4933 int status;
dea3101e 4934
5989b8d4 4935 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
915caaaf 4936 return FAILED;
bbb9d180 4937
19ca7609 4938 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
dea3101e 4939 if (lpfc_cmd == NULL)
915caaaf 4940 return FAILED;
dea3101e 4941 lpfc_cmd->timeout = 60;
0b18ac42 4942 lpfc_cmd->rdata = rdata;
dea3101e 4943
bbb9d180
JS
4944 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4945 task_mgmt_cmd);
915caaaf
JS
4946 if (!status) {
4947 lpfc_release_scsi_buf(phba, lpfc_cmd);
4948 return FAILED;
4949 }
dea3101e 4950
bbb9d180 4951 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 4952 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
4953 if (iocbqrsp == NULL) {
4954 lpfc_release_scsi_buf(phba, lpfc_cmd);
4955 return FAILED;
4956 }
bbb9d180 4957
e8b62011 4958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
bbb9d180 4959 "0702 Issue %s to TGT %d LUN %d "
6d368e53 4960 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
bbb9d180 4961 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6d368e53
JS
4962 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4963 iocbq->iocb_flag);
bbb9d180 4964
3772a991 4965 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 4966 iocbq, iocbqrsp, lpfc_cmd->timeout);
bbb9d180
JS
4967 if (status != IOCB_SUCCESS) {
4968 if (status == IOCB_TIMEDOUT) {
4969 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4970 ret = TIMEOUT_ERROR;
4971 } else
915caaaf 4972 ret = FAILED;
bbb9d180
JS
4973 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4974 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6d368e53
JS
4975 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4976 "iocb_flag x%x\n",
bbb9d180
JS
4977 lpfc_taskmgmt_name(task_mgmt_cmd),
4978 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
6d368e53
JS
4979 iocbqrsp->iocb.un.ulpWord[4],
4980 iocbq->iocb_flag);
2a9bf3d0
JS
4981 } else if (status == IOCB_BUSY)
4982 ret = FAILED;
4983 else
bbb9d180
JS
4984 ret = SUCCESS;
4985
6175c02a 4986 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
4987
4988 if (ret != TIMEOUT_ERROR)
4989 lpfc_release_scsi_buf(phba, lpfc_cmd);
4990
4991 return ret;
4992}
4993
4994/**
4995 * lpfc_chk_tgt_mapped -
4996 * @vport: The virtual port to check on
4997 * @cmnd: Pointer to scsi_cmnd data structure.
4998 *
4999 * This routine delays until the scsi target (aka rport) for the
5000 * command exists (is present and logged in) or we declare it non-existent.
5001 *
5002 * Return code :
5003 * 0x2003 - Error
5004 * 0x2002 - Success
5005 **/
5006static int
5007lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5008{
5009 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 5010 struct lpfc_nodelist *pnode;
bbb9d180
JS
5011 unsigned long later;
5012
1c6f4ef5
JS
5013 if (!rdata) {
5014 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5015 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
5016 return FAILED;
5017 }
5018 pnode = rdata->pnode;
bbb9d180
JS
5019 /*
5020 * If target is not in a MAPPED state, delay until
5021 * target is rediscovered or devloss timeout expires.
5022 */
5023 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5024 while (time_after(later, jiffies)) {
5025 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5026 return FAILED;
5027 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5028 return SUCCESS;
5029 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5030 rdata = cmnd->device->hostdata;
5031 if (!rdata)
5032 return FAILED;
5033 pnode = rdata->pnode;
5034 }
5035 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5036 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5037 return FAILED;
5038 return SUCCESS;
5039}
5040
5041/**
5042 * lpfc_reset_flush_io_context -
5043 * @vport: The virtual port (scsi_host) for the flush context
5044 * @tgt_id: If aborting by Target contect - specifies the target id
5045 * @lun_id: If aborting by Lun context - specifies the lun id
5046 * @context: specifies the context level to flush at.
5047 *
5048 * After a reset condition via TMF, we need to flush orphaned i/o
5049 * contexts from the adapter. This routine aborts any contexts
5050 * outstanding, then waits for their completions. The wait is
5051 * bounded by devloss_tmo though.
5052 *
5053 * Return code :
5054 * 0x2003 - Error
5055 * 0x2002 - Success
5056 **/
5057static int
5058lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5059 uint64_t lun_id, lpfc_ctx_cmd context)
5060{
5061 struct lpfc_hba *phba = vport->phba;
5062 unsigned long later;
5063 int cnt;
5064
5065 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 5066 if (cnt)
51ef4c26 5067 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
bbb9d180 5068 tgt_id, lun_id, context);
915caaaf
JS
5069 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5070 while (time_after(later, jiffies) && cnt) {
5071 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 5072 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 5073 }
dea3101e 5074 if (cnt) {
e8b62011 5075 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
bbb9d180
JS
5076 "0724 I/O flush failure for context %s : cnt x%x\n",
5077 ((context == LPFC_CTX_LUN) ? "LUN" :
5078 ((context == LPFC_CTX_TGT) ? "TGT" :
5079 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5080 cnt);
5081 return FAILED;
dea3101e 5082 }
bbb9d180
JS
5083 return SUCCESS;
5084}
5085
5086/**
5087 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5088 * @cmnd: Pointer to scsi_cmnd data structure.
5089 *
5090 * This routine does a device reset by sending a LUN_RESET task management
5091 * command.
5092 *
5093 * Return code :
5094 * 0x2003 - Error
5095 * 0x2002 - Success
5096 **/
5097static int
5098lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5099{
5100 struct Scsi_Host *shost = cmnd->device->host;
5101 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5102 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 5103 struct lpfc_nodelist *pnode;
bbb9d180
JS
5104 unsigned tgt_id = cmnd->device->id;
5105 unsigned int lun_id = cmnd->device->lun;
5106 struct lpfc_scsi_event_header scsi_event;
3a70730a 5107 int status, ret = SUCCESS;
bbb9d180 5108
1c6f4ef5
JS
5109 if (!rdata) {
5110 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5111 "0798 Device Reset rport failure: rdata x%p\n", rdata);
5112 return FAILED;
5113 }
5114 pnode = rdata->pnode;
589a52d6 5115 status = fc_block_scsi_eh(cmnd);
908e18e4 5116 if (status != 0 && status != SUCCESS)
589a52d6 5117 return status;
bbb9d180
JS
5118
5119 status = lpfc_chk_tgt_mapped(vport, cmnd);
5120 if (status == FAILED) {
5121 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5122 "0721 Device Reset rport failure: rdata x%p\n", rdata);
5123 return FAILED;
5124 }
5125
5126 scsi_event.event_type = FC_REG_SCSI_EVENT;
5127 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5128 scsi_event.lun = lun_id;
5129 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5130 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5131
5132 fc_host_post_vendor_event(shost, fc_get_event_number(),
5133 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5134
5135 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5136 FCP_LUN_RESET);
5137
5138 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5139 "0713 SCSI layer issued Device Reset (%d, %d) "
5140 "return x%x\n", tgt_id, lun_id, status);
5141
5142 /*
5143 * We have to clean up i/o as : they may be orphaned by the TMF;
5144 * or if the TMF failed, they may be in an indeterminate state.
5145 * So, continue on.
5146 * We will report success if all the i/o aborts successfully.
5147 */
3a70730a 5148 ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
bbb9d180 5149 LPFC_CTX_LUN);
3a70730a 5150 return ret;
bbb9d180
JS
5151}
5152
5153/**
5154 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5155 * @cmnd: Pointer to scsi_cmnd data structure.
5156 *
5157 * This routine does a target reset by sending a TARGET_RESET task management
5158 * command.
5159 *
5160 * Return code :
5161 * 0x2003 - Error
5162 * 0x2002 - Success
5163 **/
5164static int
5165lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5166{
5167 struct Scsi_Host *shost = cmnd->device->host;
5168 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5169 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 5170 struct lpfc_nodelist *pnode;
bbb9d180
JS
5171 unsigned tgt_id = cmnd->device->id;
5172 unsigned int lun_id = cmnd->device->lun;
5173 struct lpfc_scsi_event_header scsi_event;
3a70730a 5174 int status, ret = SUCCESS;
bbb9d180 5175
1c6f4ef5
JS
5176 if (!rdata) {
5177 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5178 "0799 Target Reset rport failure: rdata x%p\n", rdata);
5179 return FAILED;
5180 }
5181 pnode = rdata->pnode;
589a52d6 5182 status = fc_block_scsi_eh(cmnd);
908e18e4 5183 if (status != 0 && status != SUCCESS)
589a52d6 5184 return status;
bbb9d180
JS
5185
5186 status = lpfc_chk_tgt_mapped(vport, cmnd);
5187 if (status == FAILED) {
5188 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5189 "0722 Target Reset rport failure: rdata x%p\n", rdata);
5190 return FAILED;
5191 }
5192
5193 scsi_event.event_type = FC_REG_SCSI_EVENT;
5194 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5195 scsi_event.lun = 0;
5196 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5197 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5198
5199 fc_host_post_vendor_event(shost, fc_get_event_number(),
5200 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5201
5202 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5203 FCP_TARGET_RESET);
5204
5205 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5206 "0723 SCSI layer issued Target Reset (%d, %d) "
5207 "return x%x\n", tgt_id, lun_id, status);
5208
5209 /*
5210 * We have to clean up i/o as : they may be orphaned by the TMF;
5211 * or if the TMF failed, they may be in an indeterminate state.
5212 * So, continue on.
5213 * We will report success if all the i/o aborts successfully.
5214 */
3a70730a
JS
5215 ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5216 LPFC_CTX_TGT);
5217 return ret;
dea3101e 5218}
5219
9bad7671 5220/**
3621a710 5221 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
5222 * @cmnd: Pointer to scsi_cmnd data structure.
5223 *
bbb9d180
JS
5224 * This routine does target reset to all targets on @cmnd->device->host.
5225 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 5226 *
bbb9d180
JS
5227 * Return code :
5228 * 0x2003 - Error
5229 * 0x2002 - Success
9bad7671 5230 **/
94d0e7b8 5231static int
7054a606 5232lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 5233{
2e0fef85
JS
5234 struct Scsi_Host *shost = cmnd->device->host;
5235 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 5236 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 5237 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
5238 int match;
5239 int ret = SUCCESS, status, i;
ea2151b4
JS
5240
5241 scsi_event.event_type = FC_REG_SCSI_EVENT;
5242 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5243 scsi_event.lun = 0;
5244 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5245 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5246
bbb9d180
JS
5247 fc_host_post_vendor_event(shost, fc_get_event_number(),
5248 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 5249
bf08611b 5250 status = fc_block_scsi_eh(cmnd);
908e18e4 5251 if (status != 0 && status != SUCCESS)
bf08611b 5252 return status;
bbb9d180 5253
dea3101e 5254 /*
5255 * Since the driver manages a single bus device, reset all
5256 * targets known to the driver. Should any target reset
5257 * fail, this routine returns failure to the midlayer.
5258 */
e17da18e 5259 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 5260 /* Search for mapped node by target ID */
dea3101e 5261 match = 0;
2e0fef85
JS
5262 spin_lock_irq(shost->host_lock);
5263 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
5264 if (!NLP_CHK_NODE_ACT(ndlp))
5265 continue;
a6571c6e
JS
5266 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5267 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5268 continue;
685f0bf7 5269 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 5270 ndlp->nlp_sid == i &&
685f0bf7 5271 ndlp->rport) {
dea3101e 5272 match = 1;
5273 break;
5274 }
5275 }
2e0fef85 5276 spin_unlock_irq(shost->host_lock);
dea3101e 5277 if (!match)
5278 continue;
bbb9d180
JS
5279
5280 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
5281 i, 0, FCP_TARGET_RESET);
5282
5283 if (status != SUCCESS) {
e8b62011
JS
5284 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5285 "0700 Bus Reset on target %d failed\n",
5286 i);
915caaaf 5287 ret = FAILED;
dea3101e 5288 }
5289 }
6175c02a 5290 /*
bbb9d180
JS
5291 * We have to clean up i/o as : they may be orphaned by the TMFs
5292 * above; or if any of the TMFs failed, they may be in an
5293 * indeterminate state.
5294 * We will report success if all the i/o aborts successfully.
6175c02a 5295 */
bbb9d180
JS
5296
5297 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5298 if (status != SUCCESS)
0bd4ca25 5299 ret = FAILED;
bbb9d180 5300
e8b62011
JS
5301 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5302 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e 5303 return ret;
5304}
5305
27b01b82
JS
5306/**
5307 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5308 * @cmnd: Pointer to scsi_cmnd data structure.
5309 *
5310 * This routine does host reset to the adaptor port. It brings the HBA
5311 * offline, performs a board restart, and then brings the board back online.
5312 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5313 * reject all outstanding SCSI commands to the host and error returned
5314 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5315 * of error handling, it will only return error if resetting of the adapter
5316 * is not successful; in all other cases, will return success.
5317 *
5318 * Return code :
5319 * 0x2003 - Error
5320 * 0x2002 - Success
5321 **/
5322static int
5323lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5324{
5325 struct Scsi_Host *shost = cmnd->device->host;
5326 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5327 struct lpfc_hba *phba = vport->phba;
5328 int rc, ret = SUCCESS;
5329
618a5230 5330 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
27b01b82
JS
5331 lpfc_offline(phba);
5332 rc = lpfc_sli_brdrestart(phba);
5333 if (rc)
5334 ret = FAILED;
5335 lpfc_online(phba);
5336 lpfc_unblock_mgmt_io(phba);
5337
5338 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
5339 "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
5340 return ret;
5341}
5342
9bad7671 5343/**
3621a710 5344 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
5345 * @sdev: Pointer to scsi_device.
5346 *
5347 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5348 * globally available list of scsi buffers. This routine also makes sure scsi
5349 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5350 * of scsi buffer exists for the lifetime of the driver.
5351 *
5352 * Return codes:
5353 * non-0 - Error
5354 * 0 - Success
5355 **/
dea3101e 5356static int
5357lpfc_slave_alloc(struct scsi_device *sdev)
5358{
2e0fef85
JS
5359 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5360 struct lpfc_hba *phba = vport->phba;
19a7b4ae 5361 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 5362 uint32_t total = 0;
dea3101e 5363 uint32_t num_to_alloc = 0;
3772a991 5364 int num_allocated = 0;
d7c47992 5365 uint32_t sdev_cnt;
dea3101e 5366
19a7b4ae 5367 if (!rport || fc_remote_port_chkready(rport))
dea3101e 5368 return -ENXIO;
5369
19a7b4ae 5370 sdev->hostdata = rport->dd_data;
d7c47992 5371 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e 5372
5373 /*
5374 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5375 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
5376 * HBA limit conveyed to the midlayer via the host structure. The
5377 * formula accounts for the lun_queue_depth + error handlers + 1
5378 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e 5379 */
5380 total = phba->total_scsi_bufs;
3de2a653 5381 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 5382
d7c47992
JS
5383 /* If allocated buffers are enough do nothing */
5384 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5385 return 0;
5386
92d7f7b0
JS
5387 /* Allow some exchanges to be available always to complete discovery */
5388 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
5389 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5390 "0704 At limitation of %d preallocated "
5391 "command buffers\n", total);
dea3101e 5392 return 0;
92d7f7b0
JS
5393 /* Allow some exchanges to be available always to complete discovery */
5394 } else if (total + num_to_alloc >
5395 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
5396 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5397 "0705 Allocation request of %d "
5398 "command buffers will exceed max of %d. "
5399 "Reducing allocation request to %d.\n",
5400 num_to_alloc, phba->cfg_hba_queue_depth,
5401 (phba->cfg_hba_queue_depth - total));
dea3101e 5402 num_to_alloc = phba->cfg_hba_queue_depth - total;
5403 }
3772a991
JS
5404 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5405 if (num_to_alloc != num_allocated) {
96f7077f
JS
5406 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5407 "0708 Allocation request of %d "
5408 "command buffers did not succeed. "
5409 "Allocated %d buffers.\n",
5410 num_to_alloc, num_allocated);
dea3101e 5411 }
1c6f4ef5
JS
5412 if (num_allocated > 0)
5413 phba->total_scsi_bufs += num_allocated;
dea3101e 5414 return 0;
5415}
5416
9bad7671 5417/**
3621a710 5418 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
5419 * @sdev: Pointer to scsi_device.
5420 *
5421 * This routine configures following items
5422 * - Tag command queuing support for @sdev if supported.
9bad7671
JS
5423 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5424 *
5425 * Return codes:
5426 * 0 - Success
5427 **/
dea3101e 5428static int
5429lpfc_slave_configure(struct scsi_device *sdev)
5430{
2e0fef85
JS
5431 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5432 struct lpfc_hba *phba = vport->phba;
dea3101e 5433
5434 if (sdev->tagged_supported)
3de2a653 5435 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5436 else
3de2a653 5437 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5438
875fbdfe 5439 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
5440 lpfc_sli_handle_fast_ring_event(phba,
5441 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
5442 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5443 lpfc_poll_rearm_timer(phba);
5444 }
5445
dea3101e 5446 return 0;
5447}
5448
9bad7671 5449/**
3621a710 5450 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
5451 * @sdev: Pointer to scsi_device.
5452 *
5453 * This routine sets @sdev hostatdata filed to null.
5454 **/
dea3101e 5455static void
5456lpfc_slave_destroy(struct scsi_device *sdev)
5457{
d7c47992
JS
5458 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5459 struct lpfc_hba *phba = vport->phba;
5460 atomic_dec(&phba->sdev_cnt);
dea3101e 5461 sdev->hostdata = NULL;
5462 return;
5463}
5464
92d7f7b0 5465
dea3101e 5466struct scsi_host_template lpfc_template = {
5467 .module = THIS_MODULE,
5468 .name = LPFC_DRIVER_NAME,
5469 .info = lpfc_info,
5470 .queuecommand = lpfc_queuecommand,
5471 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5472 .eh_device_reset_handler = lpfc_device_reset_handler,
5473 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 5474 .eh_bus_reset_handler = lpfc_bus_reset_handler,
27b01b82 5475 .eh_host_reset_handler = lpfc_host_reset_handler,
dea3101e 5476 .slave_alloc = lpfc_slave_alloc,
5477 .slave_configure = lpfc_slave_configure,
5478 .slave_destroy = lpfc_slave_destroy,
47a8617c 5479 .scan_finished = lpfc_scan_finished,
dea3101e 5480 .this_id = -1,
83108bd3 5481 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 5482 .cmd_per_lun = LPFC_CMD_PER_LUN,
5483 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 5484 .shost_attrs = lpfc_hba_attrs,
564b2960 5485 .max_sectors = 0xFFFF,
f1c3b0fc 5486 .vendor_id = LPFC_NL_VENDOR_ID,
5ffc266e 5487 .change_queue_depth = lpfc_change_queue_depth,
fe8f7f9c 5488 .change_queue_type = lpfc_change_queue_type,
dea3101e 5489};
3de2a653
JS
5490
5491struct scsi_host_template lpfc_vport_template = {
5492 .module = THIS_MODULE,
5493 .name = LPFC_DRIVER_NAME,
5494 .info = lpfc_info,
5495 .queuecommand = lpfc_queuecommand,
5496 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5497 .eh_device_reset_handler = lpfc_device_reset_handler,
5498 .eh_target_reset_handler = lpfc_target_reset_handler,
3de2a653
JS
5499 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5500 .slave_alloc = lpfc_slave_alloc,
5501 .slave_configure = lpfc_slave_configure,
5502 .slave_destroy = lpfc_slave_destroy,
5503 .scan_finished = lpfc_scan_finished,
5504 .this_id = -1,
83108bd3 5505 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
5506 .cmd_per_lun = LPFC_CMD_PER_LUN,
5507 .use_clustering = ENABLE_CLUSTERING,
5508 .shost_attrs = lpfc_vport_attrs,
5509 .max_sectors = 0xFFFF,
5ffc266e 5510 .change_queue_depth = lpfc_change_queue_depth,
fe8f7f9c 5511 .change_queue_type = lpfc_change_queue_type,
3de2a653 5512};
This page took 1.046522 seconds and 5 git commands to generate.