[SCSI] lpfc 8.3.6 : Fix critical errors
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <asm/unaligned.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_version.h"
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45
46 #define LPFC_RESET_WAIT 2
47 #define LPFC_ABORT_WAIT 2
48
49 int _dump_buf_done;
50
51 static char *dif_op_str[] = {
52 "SCSI_PROT_NORMAL",
53 "SCSI_PROT_READ_INSERT",
54 "SCSI_PROT_WRITE_STRIP",
55 "SCSI_PROT_READ_STRIP",
56 "SCSI_PROT_WRITE_INSERT",
57 "SCSI_PROT_READ_PASS",
58 "SCSI_PROT_WRITE_PASS",
59 };
60 static void
61 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
62 static void
63 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
64
65 static void
66 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
67 {
68 void *src, *dst;
69 struct scatterlist *sgde = scsi_sglist(cmnd);
70
71 if (!_dump_buf_data) {
72 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
73 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
74 __func__);
75 return;
76 }
77
78
79 if (!sgde) {
80 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
81 "9051 BLKGRD: ERROR: data scatterlist is null\n");
82 return;
83 }
84
85 dst = (void *) _dump_buf_data;
86 while (sgde) {
87 src = sg_virt(sgde);
88 memcpy(dst, src, sgde->length);
89 dst += sgde->length;
90 sgde = sg_next(sgde);
91 }
92 }
93
94 static void
95 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
96 {
97 void *src, *dst;
98 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
99
100 if (!_dump_buf_dif) {
101 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
102 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
103 __func__);
104 return;
105 }
106
107 if (!sgde) {
108 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
109 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
110 return;
111 }
112
113 dst = _dump_buf_dif;
114 while (sgde) {
115 src = sg_virt(sgde);
116 memcpy(dst, src, sgde->length);
117 dst += sgde->length;
118 sgde = sg_next(sgde);
119 }
120 }
121
122 /**
123 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
124 * @phba: Pointer to HBA object.
125 * @lpfc_cmd: lpfc scsi command object pointer.
126 *
127 * This function is called from the lpfc_prep_task_mgmt_cmd function to
128 * set the last bit in the response sge entry.
129 **/
130 static void
131 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
132 struct lpfc_scsi_buf *lpfc_cmd)
133 {
134 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
135 if (sgl) {
136 sgl += 1;
137 sgl->word2 = le32_to_cpu(sgl->word2);
138 bf_set(lpfc_sli4_sge_last, sgl, 1);
139 sgl->word2 = cpu_to_le32(sgl->word2);
140 }
141 }
142
143 /**
144 * lpfc_update_stats - Update statistical data for the command completion
145 * @phba: Pointer to HBA object.
146 * @lpfc_cmd: lpfc scsi command object pointer.
147 *
148 * This function is called when there is a command completion and this
149 * function updates the statistical data for the command completion.
150 **/
151 static void
152 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
153 {
154 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
155 struct lpfc_nodelist *pnode = rdata->pnode;
156 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
157 unsigned long flags;
158 struct Scsi_Host *shost = cmd->device->host;
159 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
160 unsigned long latency;
161 int i;
162
163 if (cmd->result)
164 return;
165
166 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
167
168 spin_lock_irqsave(shost->host_lock, flags);
169 if (!vport->stat_data_enabled ||
170 vport->stat_data_blocked ||
171 !pnode->lat_data ||
172 (phba->bucket_type == LPFC_NO_BUCKET)) {
173 spin_unlock_irqrestore(shost->host_lock, flags);
174 return;
175 }
176
177 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
178 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
179 phba->bucket_step;
180 /* check array subscript bounds */
181 if (i < 0)
182 i = 0;
183 else if (i >= LPFC_MAX_BUCKET_COUNT)
184 i = LPFC_MAX_BUCKET_COUNT - 1;
185 } else {
186 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
187 if (latency <= (phba->bucket_base +
188 ((1<<i)*phba->bucket_step)))
189 break;
190 }
191
192 pnode->lat_data[i].cmd_count++;
193 spin_unlock_irqrestore(shost->host_lock, flags);
194 }
195
196 /**
197 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
198 * @phba: Pointer to HBA context object.
199 * @vport: Pointer to vport object.
200 * @ndlp: Pointer to FC node associated with the target.
201 * @lun: Lun number of the scsi device.
202 * @old_val: Old value of the queue depth.
203 * @new_val: New value of the queue depth.
204 *
205 * This function sends an event to the mgmt application indicating
206 * there is a change in the scsi device queue depth.
207 **/
208 static void
209 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
210 struct lpfc_vport *vport,
211 struct lpfc_nodelist *ndlp,
212 uint32_t lun,
213 uint32_t old_val,
214 uint32_t new_val)
215 {
216 struct lpfc_fast_path_event *fast_path_evt;
217 unsigned long flags;
218
219 fast_path_evt = lpfc_alloc_fast_evt(phba);
220 if (!fast_path_evt)
221 return;
222
223 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
224 FC_REG_SCSI_EVENT;
225 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
226 LPFC_EVENT_VARQUEDEPTH;
227
228 /* Report all luns with change in queue depth */
229 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
230 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
231 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
232 &ndlp->nlp_portname, sizeof(struct lpfc_name));
233 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
234 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
235 }
236
237 fast_path_evt->un.queue_depth_evt.oldval = old_val;
238 fast_path_evt->un.queue_depth_evt.newval = new_val;
239 fast_path_evt->vport = vport;
240
241 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
242 spin_lock_irqsave(&phba->hbalock, flags);
243 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
244 spin_unlock_irqrestore(&phba->hbalock, flags);
245 lpfc_worker_wake_up(phba);
246
247 return;
248 }
249
250 /**
251 * lpfc_change_queue_depth - Alter scsi device queue depth
252 * @sdev: Pointer the scsi device on which to change the queue depth.
253 * @qdepth: New queue depth to set the sdev to.
254 * @reason: The reason for the queue depth change.
255 *
256 * This function is called by the midlayer and the LLD to alter the queue
257 * depth for a scsi device. This function sets the queue depth to the new
258 * value and sends an event out to log the queue depth change.
259 **/
260 int
261 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
262 {
263 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
264 struct lpfc_hba *phba = vport->phba;
265 struct lpfc_rport_data *rdata;
266 unsigned long new_queue_depth, old_queue_depth;
267
268 old_queue_depth = sdev->queue_depth;
269 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
270 new_queue_depth = sdev->queue_depth;
271 rdata = sdev->hostdata;
272 if (rdata)
273 lpfc_send_sdev_queuedepth_change_event(phba, vport,
274 rdata->pnode, sdev->lun,
275 old_queue_depth,
276 new_queue_depth);
277 return sdev->queue_depth;
278 }
279
280 /**
281 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
282 * @phba: The Hba for which this call is being executed.
283 *
284 * This routine is called when there is resource error in driver or firmware.
285 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
286 * posts at most 1 event each second. This routine wakes up worker thread of
287 * @phba to process WORKER_RAM_DOWN_EVENT event.
288 *
289 * This routine should be called with no lock held.
290 **/
291 void
292 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
293 {
294 unsigned long flags;
295 uint32_t evt_posted;
296
297 spin_lock_irqsave(&phba->hbalock, flags);
298 atomic_inc(&phba->num_rsrc_err);
299 phba->last_rsrc_error_time = jiffies;
300
301 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
302 spin_unlock_irqrestore(&phba->hbalock, flags);
303 return;
304 }
305
306 phba->last_ramp_down_time = jiffies;
307
308 spin_unlock_irqrestore(&phba->hbalock, flags);
309
310 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
311 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
312 if (!evt_posted)
313 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
314 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
315
316 if (!evt_posted)
317 lpfc_worker_wake_up(phba);
318 return;
319 }
320
321 /**
322 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
323 * @phba: The Hba for which this call is being executed.
324 *
325 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
326 * post at most 1 event every 5 minute after last_ramp_up_time or
327 * last_rsrc_error_time. This routine wakes up worker thread of @phba
328 * to process WORKER_RAM_DOWN_EVENT event.
329 *
330 * This routine should be called with no lock held.
331 **/
332 static inline void
333 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
334 uint32_t queue_depth)
335 {
336 unsigned long flags;
337 struct lpfc_hba *phba = vport->phba;
338 uint32_t evt_posted;
339 atomic_inc(&phba->num_cmd_success);
340
341 if (vport->cfg_lun_queue_depth <= queue_depth)
342 return;
343 spin_lock_irqsave(&phba->hbalock, flags);
344 if (time_before(jiffies,
345 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
346 time_before(jiffies,
347 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
348 spin_unlock_irqrestore(&phba->hbalock, flags);
349 return;
350 }
351 phba->last_ramp_up_time = jiffies;
352 spin_unlock_irqrestore(&phba->hbalock, flags);
353
354 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
355 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
356 if (!evt_posted)
357 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
358 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
359
360 if (!evt_posted)
361 lpfc_worker_wake_up(phba);
362 return;
363 }
364
365 /**
366 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
367 * @phba: The Hba for which this call is being executed.
368 *
369 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
370 * thread.This routine reduces queue depth for all scsi device on each vport
371 * associated with @phba.
372 **/
373 void
374 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
375 {
376 struct lpfc_vport **vports;
377 struct Scsi_Host *shost;
378 struct scsi_device *sdev;
379 unsigned long new_queue_depth;
380 unsigned long num_rsrc_err, num_cmd_success;
381 int i;
382
383 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
384 num_cmd_success = atomic_read(&phba->num_cmd_success);
385
386 vports = lpfc_create_vport_work_array(phba);
387 if (vports != NULL)
388 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
389 shost = lpfc_shost_from_vport(vports[i]);
390 shost_for_each_device(sdev, shost) {
391 new_queue_depth =
392 sdev->queue_depth * num_rsrc_err /
393 (num_rsrc_err + num_cmd_success);
394 if (!new_queue_depth)
395 new_queue_depth = sdev->queue_depth - 1;
396 else
397 new_queue_depth = sdev->queue_depth -
398 new_queue_depth;
399 lpfc_change_queue_depth(sdev, new_queue_depth,
400 SCSI_QDEPTH_DEFAULT);
401 }
402 }
403 lpfc_destroy_vport_work_array(phba, vports);
404 atomic_set(&phba->num_rsrc_err, 0);
405 atomic_set(&phba->num_cmd_success, 0);
406 }
407
408 /**
409 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
410 * @phba: The Hba for which this call is being executed.
411 *
412 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
413 * thread.This routine increases queue depth for all scsi device on each vport
414 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
415 * num_cmd_success to zero.
416 **/
417 void
418 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
419 {
420 struct lpfc_vport **vports;
421 struct Scsi_Host *shost;
422 struct scsi_device *sdev;
423 int i;
424
425 vports = lpfc_create_vport_work_array(phba);
426 if (vports != NULL)
427 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
428 shost = lpfc_shost_from_vport(vports[i]);
429 shost_for_each_device(sdev, shost) {
430 if (vports[i]->cfg_lun_queue_depth <=
431 sdev->queue_depth)
432 continue;
433 lpfc_change_queue_depth(sdev,
434 sdev->queue_depth+1,
435 SCSI_QDEPTH_RAMP_UP);
436 }
437 }
438 lpfc_destroy_vport_work_array(phba, vports);
439 atomic_set(&phba->num_rsrc_err, 0);
440 atomic_set(&phba->num_cmd_success, 0);
441 }
442
443 /**
444 * lpfc_scsi_dev_block - set all scsi hosts to block state
445 * @phba: Pointer to HBA context object.
446 *
447 * This function walks vport list and set each SCSI host to block state
448 * by invoking fc_remote_port_delete() routine. This function is invoked
449 * with EEH when device's PCI slot has been permanently disabled.
450 **/
451 void
452 lpfc_scsi_dev_block(struct lpfc_hba *phba)
453 {
454 struct lpfc_vport **vports;
455 struct Scsi_Host *shost;
456 struct scsi_device *sdev;
457 struct fc_rport *rport;
458 int i;
459
460 vports = lpfc_create_vport_work_array(phba);
461 if (vports != NULL)
462 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
463 shost = lpfc_shost_from_vport(vports[i]);
464 shost_for_each_device(sdev, shost) {
465 rport = starget_to_rport(scsi_target(sdev));
466 fc_remote_port_delete(rport);
467 }
468 }
469 lpfc_destroy_vport_work_array(phba, vports);
470 }
471
472 /**
473 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
474 * @vport: The virtual port for which this call being executed.
475 * @num_to_allocate: The requested number of buffers to allocate.
476 *
477 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
478 * the scsi buffer contains all the necessary information needed to initiate
479 * a SCSI I/O. The non-DMAable buffer region contains information to build
480 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
481 * and the initial BPL. In addition to allocating memory, the FCP CMND and
482 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
483 *
484 * Return codes:
485 * int - number of scsi buffers that were allocated.
486 * 0 = failure, less than num_to_alloc is a partial failure.
487 **/
488 static int
489 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
490 {
491 struct lpfc_hba *phba = vport->phba;
492 struct lpfc_scsi_buf *psb;
493 struct ulp_bde64 *bpl;
494 IOCB_t *iocb;
495 dma_addr_t pdma_phys_fcp_cmd;
496 dma_addr_t pdma_phys_fcp_rsp;
497 dma_addr_t pdma_phys_bpl;
498 uint16_t iotag;
499 int bcnt;
500
501 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
502 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
503 if (!psb)
504 break;
505
506 /*
507 * Get memory from the pci pool to map the virt space to pci
508 * bus space for an I/O. The DMA buffer includes space for the
509 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
510 * necessary to support the sg_tablesize.
511 */
512 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
513 GFP_KERNEL, &psb->dma_handle);
514 if (!psb->data) {
515 kfree(psb);
516 break;
517 }
518
519 /* Initialize virtual ptrs to dma_buf region. */
520 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
521
522 /* Allocate iotag for psb->cur_iocbq. */
523 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
524 if (iotag == 0) {
525 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
526 psb->data, psb->dma_handle);
527 kfree(psb);
528 break;
529 }
530 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
531
532 psb->fcp_cmnd = psb->data;
533 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
534 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
535 sizeof(struct fcp_rsp);
536
537 /* Initialize local short-hand pointers. */
538 bpl = psb->fcp_bpl;
539 pdma_phys_fcp_cmd = psb->dma_handle;
540 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
541 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
542 sizeof(struct fcp_rsp);
543
544 /*
545 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
546 * are sg list bdes. Initialize the first two and leave the
547 * rest for queuecommand.
548 */
549 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
550 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
551 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
552 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
553 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
554
555 /* Setup the physical region for the FCP RSP */
556 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
557 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
558 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
559 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
560 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
561
562 /*
563 * Since the IOCB for the FCP I/O is built into this
564 * lpfc_scsi_buf, initialize it with all known data now.
565 */
566 iocb = &psb->cur_iocbq.iocb;
567 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
568 if ((phba->sli_rev == 3) &&
569 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
570 /* fill in immediate fcp command BDE */
571 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
572 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
573 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
574 unsli3.fcp_ext.icd);
575 iocb->un.fcpi64.bdl.addrHigh = 0;
576 iocb->ulpBdeCount = 0;
577 iocb->ulpLe = 0;
578 /* fill in responce BDE */
579 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
580 BUFF_TYPE_BDE_64;
581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
582 sizeof(struct fcp_rsp);
583 iocb->unsli3.fcp_ext.rbde.addrLow =
584 putPaddrLow(pdma_phys_fcp_rsp);
585 iocb->unsli3.fcp_ext.rbde.addrHigh =
586 putPaddrHigh(pdma_phys_fcp_rsp);
587 } else {
588 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
589 iocb->un.fcpi64.bdl.bdeSize =
590 (2 * sizeof(struct ulp_bde64));
591 iocb->un.fcpi64.bdl.addrLow =
592 putPaddrLow(pdma_phys_bpl);
593 iocb->un.fcpi64.bdl.addrHigh =
594 putPaddrHigh(pdma_phys_bpl);
595 iocb->ulpBdeCount = 1;
596 iocb->ulpLe = 1;
597 }
598 iocb->ulpClass = CLASS3;
599 psb->status = IOSTAT_SUCCESS;
600 /* Put it back into the SCSI buffer list */
601 lpfc_release_scsi_buf_s3(phba, psb);
602
603 }
604
605 return bcnt;
606 }
607
608 /**
609 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
610 * @phba: pointer to lpfc hba data structure.
611 * @axri: pointer to the fcp xri abort wcqe structure.
612 *
613 * This routine is invoked by the worker thread to process a SLI4 fast-path
614 * FCP aborted xri.
615 **/
616 void
617 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
618 struct sli4_wcqe_xri_aborted *axri)
619 {
620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
621 struct lpfc_scsi_buf *psb, *next_psb;
622 unsigned long iflag = 0;
623
624 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
625 list_for_each_entry_safe(psb, next_psb,
626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
627 if (psb->cur_iocbq.sli4_xritag == xri) {
628 list_del(&psb->list);
629 psb->status = IOSTAT_SUCCESS;
630 spin_unlock_irqrestore(
631 &phba->sli4_hba.abts_scsi_buf_list_lock,
632 iflag);
633 lpfc_release_scsi_buf_s4(phba, psb);
634 return;
635 }
636 }
637 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
638 iflag);
639 }
640
641 /**
642 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
643 * @phba: pointer to lpfc hba data structure.
644 *
645 * This routine walks the list of scsi buffers that have been allocated and
646 * repost them to the HBA by using SGL block post. This is needed after a
647 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
648 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
649 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
650 *
651 * Returns: 0 = success, non-zero failure.
652 **/
653 int
654 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
655 {
656 struct lpfc_scsi_buf *psb;
657 int index, status, bcnt = 0, rcnt = 0, rc = 0;
658 LIST_HEAD(sblist);
659
660 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
661 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
662 if (psb) {
663 /* Remove from SCSI buffer list */
664 list_del(&psb->list);
665 /* Add it to a local SCSI buffer list */
666 list_add_tail(&psb->list, &sblist);
667 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
668 bcnt = rcnt;
669 rcnt = 0;
670 }
671 } else
672 /* A hole present in the XRI array, need to skip */
673 bcnt = rcnt;
674
675 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
676 /* End of XRI array for SCSI buffer, complete */
677 bcnt = rcnt;
678
679 /* Continue until collect up to a nembed page worth of sgls */
680 if (bcnt == 0)
681 continue;
682 /* Now, post the SCSI buffer list sgls as a block */
683 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
684 /* Reset SCSI buffer count for next round of posting */
685 bcnt = 0;
686 while (!list_empty(&sblist)) {
687 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
688 list);
689 if (status) {
690 /* Put this back on the abort scsi list */
691 psb->status = IOSTAT_LOCAL_REJECT;
692 psb->result = IOERR_ABORT_REQUESTED;
693 rc++;
694 } else
695 psb->status = IOSTAT_SUCCESS;
696 /* Put it back into the SCSI buffer list */
697 lpfc_release_scsi_buf_s4(phba, psb);
698 }
699 }
700 return rc;
701 }
702
703 /**
704 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
705 * @vport: The virtual port for which this call being executed.
706 * @num_to_allocate: The requested number of buffers to allocate.
707 *
708 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
709 * the scsi buffer contains all the necessary information needed to initiate
710 * a SCSI I/O.
711 *
712 * Return codes:
713 * int - number of scsi buffers that were allocated.
714 * 0 = failure, less than num_to_alloc is a partial failure.
715 **/
716 static int
717 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
718 {
719 struct lpfc_hba *phba = vport->phba;
720 struct lpfc_scsi_buf *psb;
721 struct sli4_sge *sgl;
722 IOCB_t *iocb;
723 dma_addr_t pdma_phys_fcp_cmd;
724 dma_addr_t pdma_phys_fcp_rsp;
725 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
726 uint16_t iotag, last_xritag = NO_XRI;
727 int status = 0, index;
728 int bcnt;
729 int non_sequential_xri = 0;
730 int rc = 0;
731 LIST_HEAD(sblist);
732
733 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
734 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
735 if (!psb)
736 break;
737
738 /*
739 * Get memory from the pci pool to map the virt space to pci bus
740 * space for an I/O. The DMA buffer includes space for the
741 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
742 * necessary to support the sg_tablesize.
743 */
744 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
745 GFP_KERNEL, &psb->dma_handle);
746 if (!psb->data) {
747 kfree(psb);
748 break;
749 }
750
751 /* Initialize virtual ptrs to dma_buf region. */
752 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
753
754 /* Allocate iotag for psb->cur_iocbq. */
755 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
756 if (iotag == 0) {
757 kfree(psb);
758 break;
759 }
760
761 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
762 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
763 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
764 psb->data, psb->dma_handle);
765 kfree(psb);
766 break;
767 }
768 if (last_xritag != NO_XRI
769 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
770 non_sequential_xri = 1;
771 } else
772 list_add_tail(&psb->list, &sblist);
773 last_xritag = psb->cur_iocbq.sli4_xritag;
774
775 index = phba->sli4_hba.scsi_xri_cnt++;
776 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
777
778 psb->fcp_bpl = psb->data;
779 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
780 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
781 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
782 sizeof(struct fcp_cmnd));
783
784 /* Initialize local short-hand pointers. */
785 sgl = (struct sli4_sge *)psb->fcp_bpl;
786 pdma_phys_bpl = psb->dma_handle;
787 pdma_phys_fcp_cmd =
788 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
789 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
790 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
791
792 /*
793 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
794 * are sg list bdes. Initialize the first two and leave the
795 * rest for queuecommand.
796 */
797 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
798 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
799 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
800 bf_set(lpfc_sli4_sge_last, sgl, 0);
801 sgl->word2 = cpu_to_le32(sgl->word2);
802 sgl->word3 = cpu_to_le32(sgl->word3);
803 sgl++;
804
805 /* Setup the physical region for the FCP RSP */
806 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
807 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
808 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
809 bf_set(lpfc_sli4_sge_last, sgl, 1);
810 sgl->word2 = cpu_to_le32(sgl->word2);
811 sgl->word3 = cpu_to_le32(sgl->word3);
812
813 /*
814 * Since the IOCB for the FCP I/O is built into this
815 * lpfc_scsi_buf, initialize it with all known data now.
816 */
817 iocb = &psb->cur_iocbq.iocb;
818 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
819 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
820 /* setting the BLP size to 2 * sizeof BDE may not be correct.
821 * We are setting the bpl to point to out sgl. An sgl's
822 * entries are 16 bytes, a bpl entries are 12 bytes.
823 */
824 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
825 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
826 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
827 iocb->ulpBdeCount = 1;
828 iocb->ulpLe = 1;
829 iocb->ulpClass = CLASS3;
830 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
831 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
832 else
833 pdma_phys_bpl1 = 0;
834 psb->dma_phys_bpl = pdma_phys_bpl;
835 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
836 if (non_sequential_xri) {
837 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
838 pdma_phys_bpl1,
839 psb->cur_iocbq.sli4_xritag);
840 if (status) {
841 /* Put this back on the abort scsi list */
842 psb->status = IOSTAT_LOCAL_REJECT;
843 psb->result = IOERR_ABORT_REQUESTED;
844 rc++;
845 } else
846 psb->status = IOSTAT_SUCCESS;
847 /* Put it back into the SCSI buffer list */
848 lpfc_release_scsi_buf_s4(phba, psb);
849 break;
850 }
851 }
852 if (bcnt) {
853 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
854 /* Reset SCSI buffer count for next round of posting */
855 while (!list_empty(&sblist)) {
856 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
857 list);
858 if (status) {
859 /* Put this back on the abort scsi list */
860 psb->status = IOSTAT_LOCAL_REJECT;
861 psb->result = IOERR_ABORT_REQUESTED;
862 rc++;
863 } else
864 psb->status = IOSTAT_SUCCESS;
865 /* Put it back into the SCSI buffer list */
866 lpfc_release_scsi_buf_s4(phba, psb);
867 }
868 }
869
870 return bcnt + non_sequential_xri - rc;
871 }
872
873 /**
874 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
875 * @vport: The virtual port for which this call being executed.
876 * @num_to_allocate: The requested number of buffers to allocate.
877 *
878 * This routine wraps the actual SCSI buffer allocator function pointer from
879 * the lpfc_hba struct.
880 *
881 * Return codes:
882 * int - number of scsi buffers that were allocated.
883 * 0 = failure, less than num_to_alloc is a partial failure.
884 **/
885 static inline int
886 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
887 {
888 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
889 }
890
891 /**
892 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
893 * @phba: The HBA for which this call is being executed.
894 *
895 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
896 * and returns to caller.
897 *
898 * Return codes:
899 * NULL - Error
900 * Pointer to lpfc_scsi_buf - Success
901 **/
902 static struct lpfc_scsi_buf*
903 lpfc_get_scsi_buf(struct lpfc_hba * phba)
904 {
905 struct lpfc_scsi_buf * lpfc_cmd = NULL;
906 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
907 unsigned long iflag = 0;
908
909 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
910 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
911 if (lpfc_cmd) {
912 lpfc_cmd->seg_cnt = 0;
913 lpfc_cmd->nonsg_phys = 0;
914 lpfc_cmd->prot_seg_cnt = 0;
915 }
916 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
917 return lpfc_cmd;
918 }
919
920 /**
921 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
922 * @phba: The Hba for which this call is being executed.
923 * @psb: The scsi buffer which is being released.
924 *
925 * This routine releases @psb scsi buffer by adding it to tail of @phba
926 * lpfc_scsi_buf_list list.
927 **/
928 static void
929 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
930 {
931 unsigned long iflag = 0;
932
933 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
934 psb->pCmd = NULL;
935 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
936 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
937 }
938
939 /**
940 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
941 * @phba: The Hba for which this call is being executed.
942 * @psb: The scsi buffer which is being released.
943 *
944 * This routine releases @psb scsi buffer by adding it to tail of @phba
945 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
946 * and cannot be reused for at least RA_TOV amount of time if it was
947 * aborted.
948 **/
949 static void
950 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
951 {
952 unsigned long iflag = 0;
953
954 if (psb->status == IOSTAT_LOCAL_REJECT
955 && psb->result == IOERR_ABORT_REQUESTED) {
956 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
957 iflag);
958 psb->pCmd = NULL;
959 list_add_tail(&psb->list,
960 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
961 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
962 iflag);
963 } else {
964
965 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
966 psb->pCmd = NULL;
967 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
968 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
969 }
970 }
971
972 /**
973 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
974 * @phba: The Hba for which this call is being executed.
975 * @psb: The scsi buffer which is being released.
976 *
977 * This routine releases @psb scsi buffer by adding it to tail of @phba
978 * lpfc_scsi_buf_list list.
979 **/
980 static void
981 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
982 {
983
984 phba->lpfc_release_scsi_buf(phba, psb);
985 }
986
987 /**
988 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
989 * @phba: The Hba for which this call is being executed.
990 * @lpfc_cmd: The scsi buffer which is going to be mapped.
991 *
992 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
993 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
994 * through sg elements and format the bdea. This routine also initializes all
995 * IOCB fields which are dependent on scsi command request buffer.
996 *
997 * Return codes:
998 * 1 - Error
999 * 0 - Success
1000 **/
1001 static int
1002 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1003 {
1004 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1005 struct scatterlist *sgel = NULL;
1006 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1007 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1008 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1009 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1010 dma_addr_t physaddr;
1011 uint32_t num_bde = 0;
1012 int nseg, datadir = scsi_cmnd->sc_data_direction;
1013
1014 /*
1015 * There are three possibilities here - use scatter-gather segment, use
1016 * the single mapping, or neither. Start the lpfc command prep by
1017 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1018 * data bde entry.
1019 */
1020 bpl += 2;
1021 if (scsi_sg_count(scsi_cmnd)) {
1022 /*
1023 * The driver stores the segment count returned from pci_map_sg
1024 * because this a count of dma-mappings used to map the use_sg
1025 * pages. They are not guaranteed to be the same for those
1026 * architectures that implement an IOMMU.
1027 */
1028
1029 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1030 scsi_sg_count(scsi_cmnd), datadir);
1031 if (unlikely(!nseg))
1032 return 1;
1033
1034 lpfc_cmd->seg_cnt = nseg;
1035 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1036 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1037 "9064 BLKGRD: %s: Too many sg segments from "
1038 "dma_map_sg. Config %d, seg_cnt %d\n",
1039 __func__, phba->cfg_sg_seg_cnt,
1040 lpfc_cmd->seg_cnt);
1041 scsi_dma_unmap(scsi_cmnd);
1042 return 1;
1043 }
1044
1045 /*
1046 * The driver established a maximum scatter-gather segment count
1047 * during probe that limits the number of sg elements in any
1048 * single scsi command. Just run through the seg_cnt and format
1049 * the bde's.
1050 * When using SLI-3 the driver will try to fit all the BDEs into
1051 * the IOCB. If it can't then the BDEs get added to a BPL as it
1052 * does for SLI-2 mode.
1053 */
1054 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1055 physaddr = sg_dma_address(sgel);
1056 if (phba->sli_rev == 3 &&
1057 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1058 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1059 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1060 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1061 data_bde->addrLow = putPaddrLow(physaddr);
1062 data_bde->addrHigh = putPaddrHigh(physaddr);
1063 data_bde++;
1064 } else {
1065 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1066 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1067 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1068 bpl->addrLow =
1069 le32_to_cpu(putPaddrLow(physaddr));
1070 bpl->addrHigh =
1071 le32_to_cpu(putPaddrHigh(physaddr));
1072 bpl++;
1073 }
1074 }
1075 }
1076
1077 /*
1078 * Finish initializing those IOCB fields that are dependent on the
1079 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1080 * explicitly reinitialized and for SLI-3 the extended bde count is
1081 * explicitly reinitialized since all iocb memory resources are reused.
1082 */
1083 if (phba->sli_rev == 3 &&
1084 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
1085 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1086 /*
1087 * The extended IOCB format can only fit 3 BDE or a BPL.
1088 * This I/O has more than 3 BDE so the 1st data bde will
1089 * be a BPL that is filled in here.
1090 */
1091 physaddr = lpfc_cmd->dma_handle;
1092 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1093 data_bde->tus.f.bdeSize = (num_bde *
1094 sizeof(struct ulp_bde64));
1095 physaddr += (sizeof(struct fcp_cmnd) +
1096 sizeof(struct fcp_rsp) +
1097 (2 * sizeof(struct ulp_bde64)));
1098 data_bde->addrHigh = putPaddrHigh(physaddr);
1099 data_bde->addrLow = putPaddrLow(physaddr);
1100 /* ebde count includes the responce bde and data bpl */
1101 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1102 } else {
1103 /* ebde count includes the responce bde and data bdes */
1104 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1105 }
1106 } else {
1107 iocb_cmd->un.fcpi64.bdl.bdeSize =
1108 ((num_bde + 2) * sizeof(struct ulp_bde64));
1109 }
1110 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1111
1112 /*
1113 * Due to difference in data length between DIF/non-DIF paths,
1114 * we need to set word 4 of IOCB here
1115 */
1116 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1117 return 0;
1118 }
1119
1120 /*
1121 * Given a scsi cmnd, determine the BlockGuard profile to be used
1122 * with the cmd
1123 */
1124 static int
1125 lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1126 {
1127 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1128 uint8_t ret_prof = LPFC_PROF_INVALID;
1129
1130 if (guard_type == SHOST_DIX_GUARD_IP) {
1131 switch (scsi_get_prot_op(sc)) {
1132 case SCSI_PROT_READ_INSERT:
1133 case SCSI_PROT_WRITE_STRIP:
1134 ret_prof = LPFC_PROF_AST2;
1135 break;
1136
1137 case SCSI_PROT_READ_STRIP:
1138 case SCSI_PROT_WRITE_INSERT:
1139 ret_prof = LPFC_PROF_A1;
1140 break;
1141
1142 case SCSI_PROT_READ_PASS:
1143 case SCSI_PROT_WRITE_PASS:
1144 ret_prof = LPFC_PROF_AST1;
1145 break;
1146
1147 case SCSI_PROT_NORMAL:
1148 default:
1149 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1150 "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
1151 scsi_get_prot_op(sc), guard_type);
1152 break;
1153
1154 }
1155 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
1156 switch (scsi_get_prot_op(sc)) {
1157 case SCSI_PROT_READ_STRIP:
1158 case SCSI_PROT_WRITE_INSERT:
1159 ret_prof = LPFC_PROF_A1;
1160 break;
1161
1162 case SCSI_PROT_READ_PASS:
1163 case SCSI_PROT_WRITE_PASS:
1164 ret_prof = LPFC_PROF_C1;
1165 break;
1166
1167 case SCSI_PROT_READ_INSERT:
1168 case SCSI_PROT_WRITE_STRIP:
1169 case SCSI_PROT_NORMAL:
1170 default:
1171 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1172 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1173 scsi_get_prot_op(sc), guard_type);
1174 break;
1175 }
1176 } else {
1177 /* unsupported format */
1178 BUG();
1179 }
1180
1181 return ret_prof;
1182 }
1183
1184 struct scsi_dif_tuple {
1185 __be16 guard_tag; /* Checksum */
1186 __be16 app_tag; /* Opaque storage */
1187 __be32 ref_tag; /* Target LBA or indirect LBA */
1188 };
1189
1190 static inline unsigned
1191 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1192 {
1193 return sc->device->sector_size;
1194 }
1195
1196 /**
1197 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1198 * @sc: in: SCSI command
1199 * @apptagmask: out: app tag mask
1200 * @apptagval: out: app tag value
1201 * @reftag: out: ref tag (reference tag)
1202 *
1203 * Description:
1204 * Extract DIF parameters from the command if possible. Otherwise,
1205 * use default parameters.
1206 *
1207 **/
1208 static inline void
1209 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1210 uint16_t *apptagval, uint32_t *reftag)
1211 {
1212 struct scsi_dif_tuple *spt;
1213 unsigned char op = scsi_get_prot_op(sc);
1214 unsigned int protcnt = scsi_prot_sg_count(sc);
1215 static int cnt;
1216
1217 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1218 op == SCSI_PROT_WRITE_PASS)) {
1219
1220 cnt++;
1221 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1222 scsi_prot_sglist(sc)[0].offset;
1223 *apptagmask = 0;
1224 *apptagval = 0;
1225 *reftag = cpu_to_be32(spt->ref_tag);
1226
1227 } else {
1228 /* SBC defines ref tag to be lower 32bits of LBA */
1229 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1230 *apptagmask = 0;
1231 *apptagval = 0;
1232 }
1233 }
1234
1235 /*
1236 * This function sets up buffer list for protection groups of
1237 * type LPFC_PG_TYPE_NO_DIF
1238 *
1239 * This is usually used when the HBA is instructed to generate
1240 * DIFs and insert them into data stream (or strip DIF from
1241 * incoming data stream)
1242 *
1243 * The buffer list consists of just one protection group described
1244 * below:
1245 * +-------------------------+
1246 * start of prot group --> | PDE_1 |
1247 * +-------------------------+
1248 * | Data BDE |
1249 * +-------------------------+
1250 * |more Data BDE's ... (opt)|
1251 * +-------------------------+
1252 *
1253 * @sc: pointer to scsi command we're working on
1254 * @bpl: pointer to buffer list for protection groups
1255 * @datacnt: number of segments of data that have been dma mapped
1256 *
1257 * Note: Data s/g buffers have been dma mapped
1258 */
1259 static int
1260 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1261 struct ulp_bde64 *bpl, int datasegcnt)
1262 {
1263 struct scatterlist *sgde = NULL; /* s/g data entry */
1264 struct lpfc_pde *pde1 = NULL;
1265 dma_addr_t physaddr;
1266 int i = 0, num_bde = 0;
1267 int datadir = sc->sc_data_direction;
1268 int prof = LPFC_PROF_INVALID;
1269 unsigned blksize;
1270 uint32_t reftag;
1271 uint16_t apptagmask, apptagval;
1272
1273 pde1 = (struct lpfc_pde *) bpl;
1274 prof = lpfc_sc_to_sli_prof(phba, sc);
1275
1276 if (prof == LPFC_PROF_INVALID)
1277 goto out;
1278
1279 /* extract some info from the scsi command for PDE1*/
1280 blksize = lpfc_cmd_blksize(sc);
1281 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1282
1283 /* setup PDE1 with what we have */
1284 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1285 BG_EC_STOP_ERR);
1286 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1287
1288 num_bde++;
1289 bpl++;
1290
1291 /* assumption: caller has already run dma_map_sg on command data */
1292 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1293 physaddr = sg_dma_address(sgde);
1294 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1295 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1296 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1297 if (datadir == DMA_TO_DEVICE)
1298 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1299 else
1300 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1301 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1302 bpl++;
1303 num_bde++;
1304 }
1305
1306 out:
1307 return num_bde;
1308 }
1309
1310 /*
1311 * This function sets up buffer list for protection groups of
1312 * type LPFC_PG_TYPE_DIF_BUF
1313 *
1314 * This is usually used when DIFs are in their own buffers,
1315 * separate from the data. The HBA can then by instructed
1316 * to place the DIFs in the outgoing stream. For read operations,
1317 * The HBA could extract the DIFs and place it in DIF buffers.
1318 *
1319 * The buffer list for this type consists of one or more of the
1320 * protection groups described below:
1321 * +-------------------------+
1322 * start of first prot group --> | PDE_1 |
1323 * +-------------------------+
1324 * | PDE_3 (Prot BDE) |
1325 * +-------------------------+
1326 * | Data BDE |
1327 * +-------------------------+
1328 * |more Data BDE's ... (opt)|
1329 * +-------------------------+
1330 * start of new prot group --> | PDE_1 |
1331 * +-------------------------+
1332 * | ... |
1333 * +-------------------------+
1334 *
1335 * @sc: pointer to scsi command we're working on
1336 * @bpl: pointer to buffer list for protection groups
1337 * @datacnt: number of segments of data that have been dma mapped
1338 * @protcnt: number of segment of protection data that have been dma mapped
1339 *
1340 * Note: It is assumed that both data and protection s/g buffers have been
1341 * mapped for DMA
1342 */
1343 static int
1344 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1345 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1346 {
1347 struct scatterlist *sgde = NULL; /* s/g data entry */
1348 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1349 struct lpfc_pde *pde1 = NULL;
1350 struct ulp_bde64 *prot_bde = NULL;
1351 dma_addr_t dataphysaddr, protphysaddr;
1352 unsigned short curr_data = 0, curr_prot = 0;
1353 unsigned int split_offset, protgroup_len;
1354 unsigned int protgrp_blks, protgrp_bytes;
1355 unsigned int remainder, subtotal;
1356 int prof = LPFC_PROF_INVALID;
1357 int datadir = sc->sc_data_direction;
1358 unsigned char pgdone = 0, alldone = 0;
1359 unsigned blksize;
1360 uint32_t reftag;
1361 uint16_t apptagmask, apptagval;
1362 int num_bde = 0;
1363
1364 sgpe = scsi_prot_sglist(sc);
1365 sgde = scsi_sglist(sc);
1366
1367 if (!sgpe || !sgde) {
1368 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1369 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1370 sgpe, sgde);
1371 return 0;
1372 }
1373
1374 prof = lpfc_sc_to_sli_prof(phba, sc);
1375 if (prof == LPFC_PROF_INVALID)
1376 goto out;
1377
1378 /* extract some info from the scsi command for PDE1*/
1379 blksize = lpfc_cmd_blksize(sc);
1380 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1381
1382 split_offset = 0;
1383 do {
1384 /* setup the first PDE_1 */
1385 pde1 = (struct lpfc_pde *) bpl;
1386
1387 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1388 BG_EC_STOP_ERR);
1389 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1390
1391 num_bde++;
1392 bpl++;
1393
1394 /* setup the first BDE that points to protection buffer */
1395 prot_bde = (struct ulp_bde64 *) bpl;
1396 protphysaddr = sg_dma_address(sgpe);
1397 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1398 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1399 protgroup_len = sg_dma_len(sgpe);
1400
1401
1402 /* must be integer multiple of the DIF block length */
1403 BUG_ON(protgroup_len % 8);
1404
1405 protgrp_blks = protgroup_len / 8;
1406 protgrp_bytes = protgrp_blks * blksize;
1407
1408 prot_bde->tus.f.bdeSize = protgroup_len;
1409 if (datadir == DMA_TO_DEVICE)
1410 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1411 else
1412 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1413 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1414
1415 curr_prot++;
1416 num_bde++;
1417
1418 /* setup BDE's for data blocks associated with DIF data */
1419 pgdone = 0;
1420 subtotal = 0; /* total bytes processed for current prot grp */
1421 while (!pgdone) {
1422 if (!sgde) {
1423 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1424 "9065 BLKGRD:%s Invalid data segment\n",
1425 __func__);
1426 return 0;
1427 }
1428 bpl++;
1429 dataphysaddr = sg_dma_address(sgde) + split_offset;
1430 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1431 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1432
1433 remainder = sg_dma_len(sgde) - split_offset;
1434
1435 if ((subtotal + remainder) <= protgrp_bytes) {
1436 /* we can use this whole buffer */
1437 bpl->tus.f.bdeSize = remainder;
1438 split_offset = 0;
1439
1440 if ((subtotal + remainder) == protgrp_bytes)
1441 pgdone = 1;
1442 } else {
1443 /* must split this buffer with next prot grp */
1444 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1445 split_offset += bpl->tus.f.bdeSize;
1446 }
1447
1448 subtotal += bpl->tus.f.bdeSize;
1449
1450 if (datadir == DMA_TO_DEVICE)
1451 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1452 else
1453 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1454 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1455
1456 num_bde++;
1457 curr_data++;
1458
1459 if (split_offset)
1460 break;
1461
1462 /* Move to the next s/g segment if possible */
1463 sgde = sg_next(sgde);
1464 }
1465
1466 /* are we done ? */
1467 if (curr_prot == protcnt) {
1468 alldone = 1;
1469 } else if (curr_prot < protcnt) {
1470 /* advance to next prot buffer */
1471 sgpe = sg_next(sgpe);
1472 bpl++;
1473
1474 /* update the reference tag */
1475 reftag += protgrp_blks;
1476 } else {
1477 /* if we're here, we have a bug */
1478 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1479 "9054 BLKGRD: bug in %s\n", __func__);
1480 }
1481
1482 } while (!alldone);
1483
1484 out:
1485
1486
1487 return num_bde;
1488 }
1489 /*
1490 * Given a SCSI command that supports DIF, determine composition of protection
1491 * groups involved in setting up buffer lists
1492 *
1493 * Returns:
1494 * for DIF (for both read and write)
1495 * */
1496 static int
1497 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1498 {
1499 int ret = LPFC_PG_TYPE_INVALID;
1500 unsigned char op = scsi_get_prot_op(sc);
1501
1502 switch (op) {
1503 case SCSI_PROT_READ_STRIP:
1504 case SCSI_PROT_WRITE_INSERT:
1505 ret = LPFC_PG_TYPE_NO_DIF;
1506 break;
1507 case SCSI_PROT_READ_INSERT:
1508 case SCSI_PROT_WRITE_STRIP:
1509 case SCSI_PROT_READ_PASS:
1510 case SCSI_PROT_WRITE_PASS:
1511 ret = LPFC_PG_TYPE_DIF_BUF;
1512 break;
1513 default:
1514 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1515 "9021 Unsupported protection op:%d\n", op);
1516 break;
1517 }
1518
1519 return ret;
1520 }
1521
1522 /*
1523 * This is the protection/DIF aware version of
1524 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1525 * two functions eventually, but for now, it's here
1526 */
1527 static int
1528 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1529 struct lpfc_scsi_buf *lpfc_cmd)
1530 {
1531 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1532 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1533 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1534 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1535 uint32_t num_bde = 0;
1536 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1537 int prot_group_type = 0;
1538 int diflen, fcpdl;
1539 unsigned blksize;
1540
1541 /*
1542 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1543 * fcp_rsp regions to the first data bde entry
1544 */
1545 bpl += 2;
1546 if (scsi_sg_count(scsi_cmnd)) {
1547 /*
1548 * The driver stores the segment count returned from pci_map_sg
1549 * because this a count of dma-mappings used to map the use_sg
1550 * pages. They are not guaranteed to be the same for those
1551 * architectures that implement an IOMMU.
1552 */
1553 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1554 scsi_sglist(scsi_cmnd),
1555 scsi_sg_count(scsi_cmnd), datadir);
1556 if (unlikely(!datasegcnt))
1557 return 1;
1558
1559 lpfc_cmd->seg_cnt = datasegcnt;
1560 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1561 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1562 "9067 BLKGRD: %s: Too many sg segments"
1563 " from dma_map_sg. Config %d, seg_cnt"
1564 " %d\n",
1565 __func__, phba->cfg_sg_seg_cnt,
1566 lpfc_cmd->seg_cnt);
1567 scsi_dma_unmap(scsi_cmnd);
1568 return 1;
1569 }
1570
1571 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1572
1573 switch (prot_group_type) {
1574 case LPFC_PG_TYPE_NO_DIF:
1575 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1576 datasegcnt);
1577 /* we shoud have 2 or more entries in buffer list */
1578 if (num_bde < 2)
1579 goto err;
1580 break;
1581 case LPFC_PG_TYPE_DIF_BUF:{
1582 /*
1583 * This type indicates that protection buffers are
1584 * passed to the driver, so that needs to be prepared
1585 * for DMA
1586 */
1587 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1588 scsi_prot_sglist(scsi_cmnd),
1589 scsi_prot_sg_count(scsi_cmnd), datadir);
1590 if (unlikely(!protsegcnt)) {
1591 scsi_dma_unmap(scsi_cmnd);
1592 return 1;
1593 }
1594
1595 lpfc_cmd->prot_seg_cnt = protsegcnt;
1596 if (lpfc_cmd->prot_seg_cnt
1597 > phba->cfg_prot_sg_seg_cnt) {
1598 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1599 "9068 BLKGRD: %s: Too many prot sg "
1600 "segments from dma_map_sg. Config %d,"
1601 "prot_seg_cnt %d\n", __func__,
1602 phba->cfg_prot_sg_seg_cnt,
1603 lpfc_cmd->prot_seg_cnt);
1604 dma_unmap_sg(&phba->pcidev->dev,
1605 scsi_prot_sglist(scsi_cmnd),
1606 scsi_prot_sg_count(scsi_cmnd),
1607 datadir);
1608 scsi_dma_unmap(scsi_cmnd);
1609 return 1;
1610 }
1611
1612 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1613 datasegcnt, protsegcnt);
1614 /* we shoud have 3 or more entries in buffer list */
1615 if (num_bde < 3)
1616 goto err;
1617 break;
1618 }
1619 case LPFC_PG_TYPE_INVALID:
1620 default:
1621 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1622 "9022 Unexpected protection group %i\n",
1623 prot_group_type);
1624 return 1;
1625 }
1626 }
1627
1628 /*
1629 * Finish initializing those IOCB fields that are dependent on the
1630 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1631 * reinitialized since all iocb memory resources are used many times
1632 * for transmit, receive, and continuation bpl's.
1633 */
1634 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1635 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1636 iocb_cmd->ulpBdeCount = 1;
1637 iocb_cmd->ulpLe = 1;
1638
1639 fcpdl = scsi_bufflen(scsi_cmnd);
1640
1641 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1642 /*
1643 * We are in DIF Type 1 mode
1644 * Every data block has a 8 byte DIF (trailer)
1645 * attached to it. Must ajust FCP data length
1646 */
1647 blksize = lpfc_cmd_blksize(scsi_cmnd);
1648 diflen = (fcpdl / blksize) * 8;
1649 fcpdl += diflen;
1650 }
1651 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1652
1653 /*
1654 * Due to difference in data length between DIF/non-DIF paths,
1655 * we need to set word 4 of IOCB here
1656 */
1657 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1658
1659 return 0;
1660 err:
1661 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1662 "9023 Could not setup all needed BDE's"
1663 "prot_group_type=%d, num_bde=%d\n",
1664 prot_group_type, num_bde);
1665 return 1;
1666 }
1667
1668 /*
1669 * This function checks for BlockGuard errors detected by
1670 * the HBA. In case of errors, the ASC/ASCQ fields in the
1671 * sense buffer will be set accordingly, paired with
1672 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1673 * detected corruption.
1674 *
1675 * Returns:
1676 * 0 - No error found
1677 * 1 - BlockGuard error found
1678 * -1 - Internal error (bad profile, ...etc)
1679 */
1680 static int
1681 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1682 struct lpfc_iocbq *pIocbOut)
1683 {
1684 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1685 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1686 int ret = 0;
1687 uint32_t bghm = bgf->bghm;
1688 uint32_t bgstat = bgf->bgstat;
1689 uint64_t failing_sector = 0;
1690
1691 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1692 " 0x%x lba 0x%llx blk cnt 0x%x "
1693 "bgstat=0x%x bghm=0x%x\n",
1694 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1695 blk_rq_sectors(cmd->request), bgstat, bghm);
1696
1697 spin_lock(&_dump_buf_lock);
1698 if (!_dump_buf_done) {
1699 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1700 " Data for %u blocks to debugfs\n",
1701 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1702 lpfc_debug_save_data(phba, cmd);
1703
1704 /* If we have a prot sgl, save the DIF buffer */
1705 if (lpfc_prot_group_type(phba, cmd) ==
1706 LPFC_PG_TYPE_DIF_BUF) {
1707 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1708 "Saving DIF for %u blocks to debugfs\n",
1709 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1710 lpfc_debug_save_dif(phba, cmd);
1711 }
1712
1713 _dump_buf_done = 1;
1714 }
1715 spin_unlock(&_dump_buf_lock);
1716
1717 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1718 cmd->result = ScsiResult(DID_ERROR, 0);
1719 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1720 " BlockGuard profile. bgstat:0x%x\n",
1721 bgstat);
1722 ret = (-1);
1723 goto out;
1724 }
1725
1726 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1727 cmd->result = ScsiResult(DID_ERROR, 0);
1728 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1729 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1730 bgstat);
1731 ret = (-1);
1732 goto out;
1733 }
1734
1735 if (lpfc_bgs_get_guard_err(bgstat)) {
1736 ret = 1;
1737
1738 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1739 0x10, 0x1);
1740 cmd->result = DRIVER_SENSE << 24
1741 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1742 phba->bg_guard_err_cnt++;
1743 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1744 "9055 BLKGRD: guard_tag error\n");
1745 }
1746
1747 if (lpfc_bgs_get_reftag_err(bgstat)) {
1748 ret = 1;
1749
1750 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1751 0x10, 0x3);
1752 cmd->result = DRIVER_SENSE << 24
1753 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1754
1755 phba->bg_reftag_err_cnt++;
1756 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1757 "9056 BLKGRD: ref_tag error\n");
1758 }
1759
1760 if (lpfc_bgs_get_apptag_err(bgstat)) {
1761 ret = 1;
1762
1763 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1764 0x10, 0x2);
1765 cmd->result = DRIVER_SENSE << 24
1766 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1767
1768 phba->bg_apptag_err_cnt++;
1769 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1770 "9061 BLKGRD: app_tag error\n");
1771 }
1772
1773 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1774 /*
1775 * setup sense data descriptor 0 per SPC-4 as an information
1776 * field, and put the failing LBA in it
1777 */
1778 cmd->sense_buffer[8] = 0; /* Information */
1779 cmd->sense_buffer[9] = 0xa; /* Add. length */
1780 bghm /= cmd->device->sector_size;
1781
1782 failing_sector = scsi_get_lba(cmd);
1783 failing_sector += bghm;
1784
1785 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1786 }
1787
1788 if (!ret) {
1789 /* No error was reported - problem in FW? */
1790 cmd->result = ScsiResult(DID_ERROR, 0);
1791 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1792 "9057 BLKGRD: no errors reported!\n");
1793 }
1794
1795 out:
1796 return ret;
1797 }
1798
1799 /**
1800 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1801 * @phba: The Hba for which this call is being executed.
1802 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1803 *
1804 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1805 * field of @lpfc_cmd for device with SLI-4 interface spec.
1806 *
1807 * Return codes:
1808 * 1 - Error
1809 * 0 - Success
1810 **/
1811 static int
1812 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1813 {
1814 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1815 struct scatterlist *sgel = NULL;
1816 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1817 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1818 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1819 dma_addr_t physaddr;
1820 uint32_t num_bde = 0;
1821 uint32_t dma_len;
1822 uint32_t dma_offset = 0;
1823 int nseg;
1824
1825 /*
1826 * There are three possibilities here - use scatter-gather segment, use
1827 * the single mapping, or neither. Start the lpfc command prep by
1828 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1829 * data bde entry.
1830 */
1831 if (scsi_sg_count(scsi_cmnd)) {
1832 /*
1833 * The driver stores the segment count returned from pci_map_sg
1834 * because this a count of dma-mappings used to map the use_sg
1835 * pages. They are not guaranteed to be the same for those
1836 * architectures that implement an IOMMU.
1837 */
1838
1839 nseg = scsi_dma_map(scsi_cmnd);
1840 if (unlikely(!nseg))
1841 return 1;
1842 sgl += 1;
1843 /* clear the last flag in the fcp_rsp map entry */
1844 sgl->word2 = le32_to_cpu(sgl->word2);
1845 bf_set(lpfc_sli4_sge_last, sgl, 0);
1846 sgl->word2 = cpu_to_le32(sgl->word2);
1847 sgl += 1;
1848
1849 lpfc_cmd->seg_cnt = nseg;
1850 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1851 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1852 " %s: Too many sg segments from "
1853 "dma_map_sg. Config %d, seg_cnt %d\n",
1854 __func__, phba->cfg_sg_seg_cnt,
1855 lpfc_cmd->seg_cnt);
1856 scsi_dma_unmap(scsi_cmnd);
1857 return 1;
1858 }
1859
1860 /*
1861 * The driver established a maximum scatter-gather segment count
1862 * during probe that limits the number of sg elements in any
1863 * single scsi command. Just run through the seg_cnt and format
1864 * the sge's.
1865 * When using SLI-3 the driver will try to fit all the BDEs into
1866 * the IOCB. If it can't then the BDEs get added to a BPL as it
1867 * does for SLI-2 mode.
1868 */
1869 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1870 physaddr = sg_dma_address(sgel);
1871 dma_len = sg_dma_len(sgel);
1872 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1873 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1874 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1875 if ((num_bde + 1) == nseg)
1876 bf_set(lpfc_sli4_sge_last, sgl, 1);
1877 else
1878 bf_set(lpfc_sli4_sge_last, sgl, 0);
1879 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1880 sgl->word2 = cpu_to_le32(sgl->word2);
1881 sgl->word3 = cpu_to_le32(sgl->word3);
1882 dma_offset += dma_len;
1883 sgl++;
1884 }
1885 } else {
1886 sgl += 1;
1887 /* clear the last flag in the fcp_rsp map entry */
1888 sgl->word2 = le32_to_cpu(sgl->word2);
1889 bf_set(lpfc_sli4_sge_last, sgl, 1);
1890 sgl->word2 = cpu_to_le32(sgl->word2);
1891 }
1892
1893 /*
1894 * Finish initializing those IOCB fields that are dependent on the
1895 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1896 * explicitly reinitialized.
1897 * all iocb memory resources are reused.
1898 */
1899 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1900
1901 /*
1902 * Due to difference in data length between DIF/non-DIF paths,
1903 * we need to set word 4 of IOCB here
1904 */
1905 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1906 return 0;
1907 }
1908
1909 /**
1910 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1911 * @phba: The Hba for which this call is being executed.
1912 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1913 *
1914 * This routine wraps the actual DMA mapping function pointer from the
1915 * lpfc_hba struct.
1916 *
1917 * Return codes:
1918 * 1 - Error
1919 * 0 - Success
1920 **/
1921 static inline int
1922 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1923 {
1924 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1925 }
1926
1927 /**
1928 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1929 * @phba: Pointer to hba context object.
1930 * @vport: Pointer to vport object.
1931 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1932 * @rsp_iocb: Pointer to response iocb object which reported error.
1933 *
1934 * This function posts an event when there is a SCSI command reporting
1935 * error from the scsi device.
1936 **/
1937 static void
1938 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1939 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1940 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1941 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1942 uint32_t resp_info = fcprsp->rspStatus2;
1943 uint32_t scsi_status = fcprsp->rspStatus3;
1944 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1945 struct lpfc_fast_path_event *fast_path_evt = NULL;
1946 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1947 unsigned long flags;
1948
1949 /* If there is queuefull or busy condition send a scsi event */
1950 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1951 (cmnd->result == SAM_STAT_BUSY)) {
1952 fast_path_evt = lpfc_alloc_fast_evt(phba);
1953 if (!fast_path_evt)
1954 return;
1955 fast_path_evt->un.scsi_evt.event_type =
1956 FC_REG_SCSI_EVENT;
1957 fast_path_evt->un.scsi_evt.subcategory =
1958 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1959 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1960 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1961 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1962 &pnode->nlp_portname, sizeof(struct lpfc_name));
1963 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1964 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1965 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1966 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1967 fast_path_evt = lpfc_alloc_fast_evt(phba);
1968 if (!fast_path_evt)
1969 return;
1970 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1971 FC_REG_SCSI_EVENT;
1972 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1973 LPFC_EVENT_CHECK_COND;
1974 fast_path_evt->un.check_cond_evt.scsi_event.lun =
1975 cmnd->device->lun;
1976 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1977 &pnode->nlp_portname, sizeof(struct lpfc_name));
1978 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1979 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1980 fast_path_evt->un.check_cond_evt.sense_key =
1981 cmnd->sense_buffer[2] & 0xf;
1982 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1983 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1984 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1985 fcpi_parm &&
1986 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1987 ((scsi_status == SAM_STAT_GOOD) &&
1988 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
1989 /*
1990 * If status is good or resid does not match with fcp_param and
1991 * there is valid fcpi_parm, then there is a read_check error
1992 */
1993 fast_path_evt = lpfc_alloc_fast_evt(phba);
1994 if (!fast_path_evt)
1995 return;
1996 fast_path_evt->un.read_check_error.header.event_type =
1997 FC_REG_FABRIC_EVENT;
1998 fast_path_evt->un.read_check_error.header.subcategory =
1999 LPFC_EVENT_FCPRDCHKERR;
2000 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
2001 &pnode->nlp_portname, sizeof(struct lpfc_name));
2002 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
2003 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2004 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
2005 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
2006 fast_path_evt->un.read_check_error.fcpiparam =
2007 fcpi_parm;
2008 } else
2009 return;
2010
2011 fast_path_evt->vport = vport;
2012 spin_lock_irqsave(&phba->hbalock, flags);
2013 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
2014 spin_unlock_irqrestore(&phba->hbalock, flags);
2015 lpfc_worker_wake_up(phba);
2016 return;
2017 }
2018
2019 /**
2020 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
2021 * @phba: The HBA for which this call is being executed.
2022 * @psb: The scsi buffer which is going to be un-mapped.
2023 *
2024 * This routine does DMA un-mapping of scatter gather list of scsi command
2025 * field of @lpfc_cmd for device with SLI-3 interface spec.
2026 **/
2027 static void
2028 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2029 {
2030 /*
2031 * There are only two special cases to consider. (1) the scsi command
2032 * requested scatter-gather usage or (2) the scsi command allocated
2033 * a request buffer, but did not request use_sg. There is a third
2034 * case, but it does not require resource deallocation.
2035 */
2036 if (psb->seg_cnt > 0)
2037 scsi_dma_unmap(psb->pCmd);
2038 if (psb->prot_seg_cnt > 0)
2039 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2040 scsi_prot_sg_count(psb->pCmd),
2041 psb->pCmd->sc_data_direction);
2042 }
2043
2044 /**
2045 * lpfc_handler_fcp_err - FCP response handler
2046 * @vport: The virtual port for which this call is being executed.
2047 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2048 * @rsp_iocb: The response IOCB which contains FCP error.
2049 *
2050 * This routine is called to process response IOCB with status field
2051 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2052 * based upon SCSI and FCP error.
2053 **/
2054 static void
2055 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2056 struct lpfc_iocbq *rsp_iocb)
2057 {
2058 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2059 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2060 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2061 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2062 uint32_t resp_info = fcprsp->rspStatus2;
2063 uint32_t scsi_status = fcprsp->rspStatus3;
2064 uint32_t *lp;
2065 uint32_t host_status = DID_OK;
2066 uint32_t rsplen = 0;
2067 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
2068
2069
2070 /*
2071 * If this is a task management command, there is no
2072 * scsi packet associated with this lpfc_cmd. The driver
2073 * consumes it.
2074 */
2075 if (fcpcmd->fcpCntl2) {
2076 scsi_status = 0;
2077 goto out;
2078 }
2079
2080 if (resp_info & RSP_LEN_VALID) {
2081 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2082 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2083 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2084 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2085 "2719 Invalid response length: "
2086 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2087 cmnd->device->id,
2088 cmnd->device->lun, cmnd->cmnd[0],
2089 rsplen);
2090 host_status = DID_ERROR;
2091 goto out;
2092 }
2093 }
2094
2095 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2096 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2097 if (snslen > SCSI_SENSE_BUFFERSIZE)
2098 snslen = SCSI_SENSE_BUFFERSIZE;
2099
2100 if (resp_info & RSP_LEN_VALID)
2101 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2102 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2103 }
2104 lp = (uint32_t *)cmnd->sense_buffer;
2105
2106 if (!scsi_status && (resp_info & RESID_UNDER))
2107 logit = LOG_FCP;
2108
2109 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2110 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2111 "Data: x%x x%x x%x x%x x%x\n",
2112 cmnd->cmnd[0], scsi_status,
2113 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2114 be32_to_cpu(fcprsp->rspResId),
2115 be32_to_cpu(fcprsp->rspSnsLen),
2116 be32_to_cpu(fcprsp->rspRspLen),
2117 fcprsp->rspInfo3);
2118
2119 scsi_set_resid(cmnd, 0);
2120 if (resp_info & RESID_UNDER) {
2121 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2122
2123 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2124 "9025 FCP Read Underrun, expected %d, "
2125 "residual %d Data: x%x x%x x%x\n",
2126 be32_to_cpu(fcpcmd->fcpDl),
2127 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2128 cmnd->underflow);
2129
2130 /*
2131 * If there is an under run check if under run reported by
2132 * storage array is same as the under run reported by HBA.
2133 * If this is not same, there is a dropped frame.
2134 */
2135 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2136 fcpi_parm &&
2137 (scsi_get_resid(cmnd) != fcpi_parm)) {
2138 lpfc_printf_vlog(vport, KERN_WARNING,
2139 LOG_FCP | LOG_FCP_ERROR,
2140 "9026 FCP Read Check Error "
2141 "and Underrun Data: x%x x%x x%x x%x\n",
2142 be32_to_cpu(fcpcmd->fcpDl),
2143 scsi_get_resid(cmnd), fcpi_parm,
2144 cmnd->cmnd[0]);
2145 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2146 host_status = DID_ERROR;
2147 }
2148 /*
2149 * The cmnd->underflow is the minimum number of bytes that must
2150 * be transfered for this command. Provided a sense condition
2151 * is not present, make sure the actual amount transferred is at
2152 * least the underflow value or fail.
2153 */
2154 if (!(resp_info & SNS_LEN_VALID) &&
2155 (scsi_status == SAM_STAT_GOOD) &&
2156 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2157 < cmnd->underflow)) {
2158 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2159 "9027 FCP command x%x residual "
2160 "underrun converted to error "
2161 "Data: x%x x%x x%x\n",
2162 cmnd->cmnd[0], scsi_bufflen(cmnd),
2163 scsi_get_resid(cmnd), cmnd->underflow);
2164 host_status = DID_ERROR;
2165 }
2166 } else if (resp_info & RESID_OVER) {
2167 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2168 "9028 FCP command x%x residual overrun error. "
2169 "Data: x%x x%x\n", cmnd->cmnd[0],
2170 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2171 host_status = DID_ERROR;
2172
2173 /*
2174 * Check SLI validation that all the transfer was actually done
2175 * (fcpi_parm should be zero). Apply check only to reads.
2176 */
2177 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
2178 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2179 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2180 "9029 FCP Read Check Error Data: "
2181 "x%x x%x x%x x%x\n",
2182 be32_to_cpu(fcpcmd->fcpDl),
2183 be32_to_cpu(fcprsp->rspResId),
2184 fcpi_parm, cmnd->cmnd[0]);
2185 host_status = DID_ERROR;
2186 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2187 }
2188
2189 out:
2190 cmnd->result = ScsiResult(host_status, scsi_status);
2191 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
2192 }
2193
2194 /**
2195 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
2196 * @phba: The Hba for which this call is being executed.
2197 * @pIocbIn: The command IOCBQ for the scsi cmnd.
2198 * @pIocbOut: The response IOCBQ for the scsi cmnd.
2199 *
2200 * This routine assigns scsi command result by looking into response IOCB
2201 * status field appropriately. This routine handles QUEUE FULL condition as
2202 * well by ramping down device queue depth.
2203 **/
2204 static void
2205 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2206 struct lpfc_iocbq *pIocbOut)
2207 {
2208 struct lpfc_scsi_buf *lpfc_cmd =
2209 (struct lpfc_scsi_buf *) pIocbIn->context1;
2210 struct lpfc_vport *vport = pIocbIn->vport;
2211 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2212 struct lpfc_nodelist *pnode = rdata->pnode;
2213 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2214 int result;
2215 struct scsi_device *tmp_sdev;
2216 int depth;
2217 unsigned long flags;
2218 struct lpfc_fast_path_event *fast_path_evt;
2219 struct Scsi_Host *shost = cmd->device->host;
2220 uint32_t queue_depth, scsi_id;
2221
2222 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2223 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2224 if (pnode && NLP_CHK_NODE_ACT(pnode))
2225 atomic_dec(&pnode->cmd_pending);
2226
2227 if (lpfc_cmd->status) {
2228 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2229 (lpfc_cmd->result & IOERR_DRVR_MASK))
2230 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2231 else if (lpfc_cmd->status >= IOSTAT_CNT)
2232 lpfc_cmd->status = IOSTAT_DEFAULT;
2233
2234 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2235 "9030 FCP cmd x%x failed <%d/%d> "
2236 "status: x%x result: x%x Data: x%x x%x\n",
2237 cmd->cmnd[0],
2238 cmd->device ? cmd->device->id : 0xffff,
2239 cmd->device ? cmd->device->lun : 0xffff,
2240 lpfc_cmd->status, lpfc_cmd->result,
2241 pIocbOut->iocb.ulpContext,
2242 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2243
2244 switch (lpfc_cmd->status) {
2245 case IOSTAT_FCP_RSP_ERROR:
2246 /* Call FCP RSP handler to determine result */
2247 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
2248 break;
2249 case IOSTAT_NPORT_BSY:
2250 case IOSTAT_FABRIC_BSY:
2251 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2252 fast_path_evt = lpfc_alloc_fast_evt(phba);
2253 if (!fast_path_evt)
2254 break;
2255 fast_path_evt->un.fabric_evt.event_type =
2256 FC_REG_FABRIC_EVENT;
2257 fast_path_evt->un.fabric_evt.subcategory =
2258 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2259 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2260 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2261 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2262 &pnode->nlp_portname,
2263 sizeof(struct lpfc_name));
2264 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2265 &pnode->nlp_nodename,
2266 sizeof(struct lpfc_name));
2267 }
2268 fast_path_evt->vport = vport;
2269 fast_path_evt->work_evt.evt =
2270 LPFC_EVT_FASTPATH_MGMT_EVT;
2271 spin_lock_irqsave(&phba->hbalock, flags);
2272 list_add_tail(&fast_path_evt->work_evt.evt_listp,
2273 &phba->work_list);
2274 spin_unlock_irqrestore(&phba->hbalock, flags);
2275 lpfc_worker_wake_up(phba);
2276 break;
2277 case IOSTAT_LOCAL_REJECT:
2278 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2279 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2280 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
2281 cmd->result = ScsiResult(DID_REQUEUE, 0);
2282 break;
2283 }
2284
2285 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2286 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2287 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2288 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2289 /*
2290 * This is a response for a BG enabled
2291 * cmd. Parse BG error
2292 */
2293 lpfc_parse_bg_err(phba, lpfc_cmd,
2294 pIocbOut);
2295 break;
2296 } else {
2297 lpfc_printf_vlog(vport, KERN_WARNING,
2298 LOG_BG,
2299 "9031 non-zero BGSTAT "
2300 "on unprotected cmd\n");
2301 }
2302 }
2303
2304 /* else: fall through */
2305 default:
2306 cmd->result = ScsiResult(DID_ERROR, 0);
2307 break;
2308 }
2309
2310 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
2311 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
2312 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2313 SAM_STAT_BUSY);
2314 } else {
2315 cmd->result = ScsiResult(DID_OK, 0);
2316 }
2317
2318 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2319 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2320
2321 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2322 "0710 Iodone <%d/%d> cmd %p, error "
2323 "x%x SNS x%x x%x Data: x%x x%x\n",
2324 cmd->device->id, cmd->device->lun, cmd,
2325 cmd->result, *lp, *(lp + 3), cmd->retries,
2326 scsi_get_resid(cmd));
2327 }
2328
2329 lpfc_update_stats(phba, lpfc_cmd);
2330 result = cmd->result;
2331 if (vport->cfg_max_scsicmpl_time &&
2332 time_after(jiffies, lpfc_cmd->start_time +
2333 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
2334 spin_lock_irqsave(shost->host_lock, flags);
2335 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2336 if (pnode->cmd_qdepth >
2337 atomic_read(&pnode->cmd_pending) &&
2338 (atomic_read(&pnode->cmd_pending) >
2339 LPFC_MIN_TGT_QDEPTH) &&
2340 ((cmd->cmnd[0] == READ_10) ||
2341 (cmd->cmnd[0] == WRITE_10)))
2342 pnode->cmd_qdepth =
2343 atomic_read(&pnode->cmd_pending);
2344
2345 pnode->last_change_time = jiffies;
2346 }
2347 spin_unlock_irqrestore(shost->host_lock, flags);
2348 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2349 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
2350 time_after(jiffies, pnode->last_change_time +
2351 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2352 spin_lock_irqsave(shost->host_lock, flags);
2353 pnode->cmd_qdepth += pnode->cmd_qdepth *
2354 LPFC_TGTQ_RAMPUP_PCENT / 100;
2355 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
2356 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2357 pnode->last_change_time = jiffies;
2358 spin_unlock_irqrestore(shost->host_lock, flags);
2359 }
2360 }
2361
2362 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2363
2364 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
2365 queue_depth = cmd->device->queue_depth;
2366 scsi_id = cmd->device->id;
2367 cmd->scsi_done(cmd);
2368
2369 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2370 /*
2371 * If there is a thread waiting for command completion
2372 * wake up the thread.
2373 */
2374 spin_lock_irqsave(shost->host_lock, flags);
2375 lpfc_cmd->pCmd = NULL;
2376 if (lpfc_cmd->waitq)
2377 wake_up(lpfc_cmd->waitq);
2378 spin_unlock_irqrestore(shost->host_lock, flags);
2379 lpfc_release_scsi_buf(phba, lpfc_cmd);
2380 return;
2381 }
2382
2383 if (!result)
2384 lpfc_rampup_queue_depth(vport, queue_depth);
2385
2386 /*
2387 * Check for queue full. If the lun is reporting queue full, then
2388 * back off the lun queue depth to prevent target overloads.
2389 */
2390 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2391 NLP_CHK_NODE_ACT(pnode)) {
2392 shost_for_each_device(tmp_sdev, shost) {
2393 if (tmp_sdev->id != scsi_id)
2394 continue;
2395 depth = scsi_track_queue_full(tmp_sdev,
2396 tmp_sdev->queue_depth-1);
2397 if (depth <= 0)
2398 continue;
2399 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2400 "0711 detected queue full - lun queue "
2401 "depth adjusted to %d.\n", depth);
2402 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2403 pnode,
2404 tmp_sdev->lun,
2405 depth+1, depth);
2406 }
2407 }
2408
2409 /*
2410 * If there is a thread waiting for command completion
2411 * wake up the thread.
2412 */
2413 spin_lock_irqsave(shost->host_lock, flags);
2414 lpfc_cmd->pCmd = NULL;
2415 if (lpfc_cmd->waitq)
2416 wake_up(lpfc_cmd->waitq);
2417 spin_unlock_irqrestore(shost->host_lock, flags);
2418
2419 lpfc_release_scsi_buf(phba, lpfc_cmd);
2420 }
2421
2422 /**
2423 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
2424 * @data: A pointer to the immediate command data portion of the IOCB.
2425 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2426 *
2427 * The routine copies the entire FCP command from @fcp_cmnd to @data while
2428 * byte swapping the data to big endian format for transmission on the wire.
2429 **/
2430 static void
2431 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2432 {
2433 int i, j;
2434 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2435 i += sizeof(uint32_t), j++) {
2436 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2437 }
2438 }
2439
2440 /**
2441 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2442 * @vport: The virtual port for which this call is being executed.
2443 * @lpfc_cmd: The scsi command which needs to send.
2444 * @pnode: Pointer to lpfc_nodelist.
2445 *
2446 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2447 * to transfer for device with SLI3 interface spec.
2448 **/
2449 static void
2450 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2451 struct lpfc_nodelist *pnode)
2452 {
2453 struct lpfc_hba *phba = vport->phba;
2454 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2455 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2456 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2457 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2458 int datadir = scsi_cmnd->sc_data_direction;
2459 char tag[2];
2460
2461 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2462 return;
2463
2464 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
2465 /* clear task management bits */
2466 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
2467
2468 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2469 &lpfc_cmd->fcp_cmnd->fcp_lun);
2470
2471 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2472
2473 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2474 switch (tag[0]) {
2475 case HEAD_OF_QUEUE_TAG:
2476 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2477 break;
2478 case ORDERED_QUEUE_TAG:
2479 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2480 break;
2481 default:
2482 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2483 break;
2484 }
2485 } else
2486 fcp_cmnd->fcpCntl1 = 0;
2487
2488 /*
2489 * There are three possibilities here - use scatter-gather segment, use
2490 * the single mapping, or neither. Start the lpfc command prep by
2491 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2492 * data bde entry.
2493 */
2494 if (scsi_sg_count(scsi_cmnd)) {
2495 if (datadir == DMA_TO_DEVICE) {
2496 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2497 if (phba->sli_rev < LPFC_SLI_REV4) {
2498 iocb_cmd->un.fcpi.fcpi_parm = 0;
2499 iocb_cmd->ulpPU = 0;
2500 } else
2501 iocb_cmd->ulpPU = PARM_READ_CHECK;
2502 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2503 phba->fc4OutputRequests++;
2504 } else {
2505 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2506 iocb_cmd->ulpPU = PARM_READ_CHECK;
2507 fcp_cmnd->fcpCntl3 = READ_DATA;
2508 phba->fc4InputRequests++;
2509 }
2510 } else {
2511 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2512 iocb_cmd->un.fcpi.fcpi_parm = 0;
2513 iocb_cmd->ulpPU = 0;
2514 fcp_cmnd->fcpCntl3 = 0;
2515 phba->fc4ControlRequests++;
2516 }
2517 if (phba->sli_rev == 3 &&
2518 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2519 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2520 /*
2521 * Finish initializing those IOCB fields that are independent
2522 * of the scsi_cmnd request_buffer
2523 */
2524 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2525 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2526 piocbq->iocb.ulpFCP2Rcvy = 1;
2527 else
2528 piocbq->iocb.ulpFCP2Rcvy = 0;
2529
2530 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2531 piocbq->context1 = lpfc_cmd;
2532 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2533 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2534 piocbq->vport = vport;
2535 }
2536
2537 /**
2538 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2539 * @vport: The virtual port for which this call is being executed.
2540 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2541 * @lun: Logical unit number.
2542 * @task_mgmt_cmd: SCSI task management command.
2543 *
2544 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2545 * for device with SLI-3 interface spec.
2546 *
2547 * Return codes:
2548 * 0 - Error
2549 * 1 - Success
2550 **/
2551 static int
2552 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2553 struct lpfc_scsi_buf *lpfc_cmd,
2554 unsigned int lun,
2555 uint8_t task_mgmt_cmd)
2556 {
2557 struct lpfc_iocbq *piocbq;
2558 IOCB_t *piocb;
2559 struct fcp_cmnd *fcp_cmnd;
2560 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2561 struct lpfc_nodelist *ndlp = rdata->pnode;
2562
2563 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2564 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2565 return 0;
2566
2567 piocbq = &(lpfc_cmd->cur_iocbq);
2568 piocbq->vport = vport;
2569
2570 piocb = &piocbq->iocb;
2571
2572 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2573 /* Clear out any old data in the FCP command area */
2574 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2575 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2576 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2577 if (vport->phba->sli_rev == 3 &&
2578 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2579 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2580 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2581 piocb->ulpContext = ndlp->nlp_rpi;
2582 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2583 piocb->ulpFCP2Rcvy = 1;
2584 }
2585 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2586
2587 /* ulpTimeout is only one byte */
2588 if (lpfc_cmd->timeout > 0xff) {
2589 /*
2590 * Do not timeout the command at the firmware level.
2591 * The driver will provide the timeout mechanism.
2592 */
2593 piocb->ulpTimeout = 0;
2594 } else
2595 piocb->ulpTimeout = lpfc_cmd->timeout;
2596
2597 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2598 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2599
2600 return 1;
2601 }
2602
2603 /**
2604 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2605 * @phba: The hba struct for which this call is being executed.
2606 * @dev_grp: The HBA PCI-Device group number.
2607 *
2608 * This routine sets up the SCSI interface API function jump table in @phba
2609 * struct.
2610 * Returns: 0 - success, -ENODEV - failure.
2611 **/
2612 int
2613 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2614 {
2615
2616 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2617 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2618 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2619
2620 switch (dev_grp) {
2621 case LPFC_PCI_DEV_LP:
2622 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2623 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2624 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2625 break;
2626 case LPFC_PCI_DEV_OC:
2627 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2628 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2629 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2630 break;
2631 default:
2632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2633 "1418 Invalid HBA PCI-device group: 0x%x\n",
2634 dev_grp);
2635 return -ENODEV;
2636 break;
2637 }
2638 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2639 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2640 return 0;
2641 }
2642
2643 /**
2644 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2645 * @phba: The Hba for which this call is being executed.
2646 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2647 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2648 *
2649 * This routine is IOCB completion routine for device reset and target reset
2650 * routine. This routine release scsi buffer associated with lpfc_cmd.
2651 **/
2652 static void
2653 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2654 struct lpfc_iocbq *cmdiocbq,
2655 struct lpfc_iocbq *rspiocbq)
2656 {
2657 struct lpfc_scsi_buf *lpfc_cmd =
2658 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2659 if (lpfc_cmd)
2660 lpfc_release_scsi_buf(phba, lpfc_cmd);
2661 return;
2662 }
2663
2664 /**
2665 * lpfc_info - Info entry point of scsi_host_template data structure
2666 * @host: The scsi host for which this call is being executed.
2667 *
2668 * This routine provides module information about hba.
2669 *
2670 * Reutrn code:
2671 * Pointer to char - Success.
2672 **/
2673 const char *
2674 lpfc_info(struct Scsi_Host *host)
2675 {
2676 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2677 struct lpfc_hba *phba = vport->phba;
2678 int len;
2679 static char lpfcinfobuf[384];
2680
2681 memset(lpfcinfobuf,0,384);
2682 if (phba && phba->pcidev){
2683 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2684 len = strlen(lpfcinfobuf);
2685 snprintf(lpfcinfobuf + len,
2686 384-len,
2687 " on PCI bus %02x device %02x irq %d",
2688 phba->pcidev->bus->number,
2689 phba->pcidev->devfn,
2690 phba->pcidev->irq);
2691 len = strlen(lpfcinfobuf);
2692 if (phba->Port[0]) {
2693 snprintf(lpfcinfobuf + len,
2694 384-len,
2695 " port %s",
2696 phba->Port);
2697 }
2698 }
2699 return lpfcinfobuf;
2700 }
2701
2702 /**
2703 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2704 * @phba: The Hba for which this call is being executed.
2705 *
2706 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2707 * The default value of cfg_poll_tmo is 10 milliseconds.
2708 **/
2709 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2710 {
2711 unsigned long poll_tmo_expires =
2712 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2713
2714 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2715 mod_timer(&phba->fcp_poll_timer,
2716 poll_tmo_expires);
2717 }
2718
2719 /**
2720 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2721 * @phba: The Hba for which this call is being executed.
2722 *
2723 * This routine starts the fcp_poll_timer of @phba.
2724 **/
2725 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2726 {
2727 lpfc_poll_rearm_timer(phba);
2728 }
2729
2730 /**
2731 * lpfc_poll_timeout - Restart polling timer
2732 * @ptr: Map to lpfc_hba data structure pointer.
2733 *
2734 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2735 * and FCP Ring interrupt is disable.
2736 **/
2737
2738 void lpfc_poll_timeout(unsigned long ptr)
2739 {
2740 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2741
2742 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2743 lpfc_sli_handle_fast_ring_event(phba,
2744 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2745
2746 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2747 lpfc_poll_rearm_timer(phba);
2748 }
2749 }
2750
2751 /**
2752 * lpfc_queuecommand - scsi_host_template queuecommand entry point
2753 * @cmnd: Pointer to scsi_cmnd data structure.
2754 * @done: Pointer to done routine.
2755 *
2756 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2757 * This routine prepares an IOCB from scsi command and provides to firmware.
2758 * The @done callback is invoked after driver finished processing the command.
2759 *
2760 * Return value :
2761 * 0 - Success
2762 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2763 **/
2764 static int
2765 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2766 {
2767 struct Scsi_Host *shost = cmnd->device->host;
2768 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2769 struct lpfc_hba *phba = vport->phba;
2770 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2771 struct lpfc_nodelist *ndlp;
2772 struct lpfc_scsi_buf *lpfc_cmd;
2773 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2774 int err;
2775
2776 err = fc_remote_port_chkready(rport);
2777 if (err) {
2778 cmnd->result = err;
2779 goto out_fail_command;
2780 }
2781 ndlp = rdata->pnode;
2782
2783 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2784 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2785
2786 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2787 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2788 " op:%02x str=%s without registering for"
2789 " BlockGuard - Rejecting command\n",
2790 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2791 dif_op_str[scsi_get_prot_op(cmnd)]);
2792 goto out_fail_command;
2793 }
2794
2795 /*
2796 * Catch race where our node has transitioned, but the
2797 * transport is still transitioning.
2798 */
2799 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2800 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2801 goto out_fail_command;
2802 }
2803 if (vport->cfg_max_scsicmpl_time &&
2804 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2805 goto out_host_busy;
2806
2807 lpfc_cmd = lpfc_get_scsi_buf(phba);
2808 if (lpfc_cmd == NULL) {
2809 lpfc_rampdown_queue_depth(phba);
2810
2811 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2812 "0707 driver's buffer pool is empty, "
2813 "IO busied\n");
2814 goto out_host_busy;
2815 }
2816
2817 /*
2818 * Store the midlayer's command structure for the completion phase
2819 * and complete the command initialization.
2820 */
2821 lpfc_cmd->pCmd = cmnd;
2822 lpfc_cmd->rdata = rdata;
2823 lpfc_cmd->timeout = 0;
2824 lpfc_cmd->start_time = jiffies;
2825 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2826 cmnd->scsi_done = done;
2827
2828 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2829 if (vport->phba->cfg_enable_bg) {
2830 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2831 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2832 "str=%s\n",
2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2834 dif_op_str[scsi_get_prot_op(cmnd)]);
2835 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2836 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2837 "%02x %02x %02x %02x %02x\n",
2838 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2839 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2840 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2841 cmnd->cmnd[9]);
2842 if (cmnd->cmnd[0] == READ_10)
2843 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2844 "9035 BLKGRD: READ @ sector %llu, "
2845 "count %u\n",
2846 (unsigned long long)scsi_get_lba(cmnd),
2847 blk_rq_sectors(cmnd->request));
2848 else if (cmnd->cmnd[0] == WRITE_10)
2849 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2850 "9036 BLKGRD: WRITE @ sector %llu, "
2851 "count %u cmd=%p\n",
2852 (unsigned long long)scsi_get_lba(cmnd),
2853 blk_rq_sectors(cmnd->request),
2854 cmnd);
2855 }
2856
2857 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2858 } else {
2859 if (vport->phba->cfg_enable_bg) {
2860 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2861 "9038 BLKGRD: rcvd unprotected cmd:"
2862 "%02x op:%02x str=%s\n",
2863 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2864 dif_op_str[scsi_get_prot_op(cmnd)]);
2865 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2866 "9039 BLKGRD: CDB: %02x %02x %02x "
2867 "%02x %02x %02x %02x %02x %02x %02x\n",
2868 cmnd->cmnd[0], cmnd->cmnd[1],
2869 cmnd->cmnd[2], cmnd->cmnd[3],
2870 cmnd->cmnd[4], cmnd->cmnd[5],
2871 cmnd->cmnd[6], cmnd->cmnd[7],
2872 cmnd->cmnd[8], cmnd->cmnd[9]);
2873 if (cmnd->cmnd[0] == READ_10)
2874 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2875 "9040 dbg: READ @ sector %llu, "
2876 "count %u\n",
2877 (unsigned long long)scsi_get_lba(cmnd),
2878 blk_rq_sectors(cmnd->request));
2879 else if (cmnd->cmnd[0] == WRITE_10)
2880 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2881 "9041 dbg: WRITE @ sector %llu, "
2882 "count %u cmd=%p\n",
2883 (unsigned long long)scsi_get_lba(cmnd),
2884 blk_rq_sectors(cmnd->request), cmnd);
2885 else
2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2887 "9042 dbg: parser not implemented\n");
2888 }
2889 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2890 }
2891
2892 if (err)
2893 goto out_host_busy_free_buf;
2894
2895 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2896
2897 atomic_inc(&ndlp->cmd_pending);
2898 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2899 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2900 if (err) {
2901 atomic_dec(&ndlp->cmd_pending);
2902 goto out_host_busy_free_buf;
2903 }
2904 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2905 spin_unlock(shost->host_lock);
2906 lpfc_sli_handle_fast_ring_event(phba,
2907 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2908
2909 spin_lock(shost->host_lock);
2910 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2911 lpfc_poll_rearm_timer(phba);
2912 }
2913
2914 return 0;
2915
2916 out_host_busy_free_buf:
2917 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2918 lpfc_release_scsi_buf(phba, lpfc_cmd);
2919 out_host_busy:
2920 return SCSI_MLQUEUE_HOST_BUSY;
2921
2922 out_fail_command:
2923 done(cmnd);
2924 return 0;
2925 }
2926
2927 /**
2928 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2929 * @cmnd: Pointer to scsi_cmnd data structure.
2930 *
2931 * This routine aborts @cmnd pending in base driver.
2932 *
2933 * Return code :
2934 * 0x2003 - Error
2935 * 0x2002 - Success
2936 **/
2937 static int
2938 lpfc_abort_handler(struct scsi_cmnd *cmnd)
2939 {
2940 struct Scsi_Host *shost = cmnd->device->host;
2941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2942 struct lpfc_hba *phba = vport->phba;
2943 struct lpfc_iocbq *iocb;
2944 struct lpfc_iocbq *abtsiocb;
2945 struct lpfc_scsi_buf *lpfc_cmd;
2946 IOCB_t *cmd, *icmd;
2947 int ret = SUCCESS;
2948 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2949
2950 fc_block_scsi_eh(cmnd);
2951 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2952 BUG_ON(!lpfc_cmd);
2953
2954 /*
2955 * If pCmd field of the corresponding lpfc_scsi_buf structure
2956 * points to a different SCSI command, then the driver has
2957 * already completed this command, but the midlayer did not
2958 * see the completion before the eh fired. Just return
2959 * SUCCESS.
2960 */
2961 iocb = &lpfc_cmd->cur_iocbq;
2962 if (lpfc_cmd->pCmd != cmnd)
2963 goto out;
2964
2965 BUG_ON(iocb->context1 != lpfc_cmd);
2966
2967 abtsiocb = lpfc_sli_get_iocbq(phba);
2968 if (abtsiocb == NULL) {
2969 ret = FAILED;
2970 goto out;
2971 }
2972
2973 /*
2974 * The scsi command can not be in txq and it is in flight because the
2975 * pCmd is still pointig at the SCSI command we have to abort. There
2976 * is no need to search the txcmplq. Just send an abort to the FW.
2977 */
2978
2979 cmd = &iocb->iocb;
2980 icmd = &abtsiocb->iocb;
2981 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2982 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2983 if (phba->sli_rev == LPFC_SLI_REV4)
2984 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
2985 else
2986 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2987
2988 icmd->ulpLe = 1;
2989 icmd->ulpClass = cmd->ulpClass;
2990
2991 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2992 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
2993
2994 if (lpfc_is_link_up(phba))
2995 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2996 else
2997 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2998
2999 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3000 abtsiocb->vport = vport;
3001 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3002 IOCB_ERROR) {
3003 lpfc_sli_release_iocbq(phba, abtsiocb);
3004 ret = FAILED;
3005 goto out;
3006 }
3007
3008 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3009 lpfc_sli_handle_fast_ring_event(phba,
3010 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3011
3012 lpfc_cmd->waitq = &waitq;
3013 /* Wait for abort to complete */
3014 wait_event_timeout(waitq,
3015 (lpfc_cmd->pCmd != cmnd),
3016 (2*vport->cfg_devloss_tmo*HZ));
3017
3018 spin_lock_irq(shost->host_lock);
3019 lpfc_cmd->waitq = NULL;
3020 spin_unlock_irq(shost->host_lock);
3021
3022 if (lpfc_cmd->pCmd == cmnd) {
3023 ret = FAILED;
3024 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3025 "0748 abort handler timed out waiting "
3026 "for abort to complete: ret %#x, ID %d, "
3027 "LUN %d, snum %#lx\n",
3028 ret, cmnd->device->id, cmnd->device->lun,
3029 cmnd->serial_number);
3030 }
3031
3032 out:
3033 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3034 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3035 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3036 cmnd->device->lun, cmnd->serial_number);
3037 return ret;
3038 }
3039
3040 static char *
3041 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3042 {
3043 switch (task_mgmt_cmd) {
3044 case FCP_ABORT_TASK_SET:
3045 return "ABORT_TASK_SET";
3046 case FCP_CLEAR_TASK_SET:
3047 return "FCP_CLEAR_TASK_SET";
3048 case FCP_BUS_RESET:
3049 return "FCP_BUS_RESET";
3050 case FCP_LUN_RESET:
3051 return "FCP_LUN_RESET";
3052 case FCP_TARGET_RESET:
3053 return "FCP_TARGET_RESET";
3054 case FCP_CLEAR_ACA:
3055 return "FCP_CLEAR_ACA";
3056 case FCP_TERMINATE_TASK:
3057 return "FCP_TERMINATE_TASK";
3058 default:
3059 return "unknown";
3060 }
3061 }
3062
3063 /**
3064 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3065 * @vport: The virtual port for which this call is being executed.
3066 * @rdata: Pointer to remote port local data
3067 * @tgt_id: Target ID of remote device.
3068 * @lun_id: Lun number for the TMF
3069 * @task_mgmt_cmd: type of TMF to send
3070 *
3071 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3072 * a remote port.
3073 *
3074 * Return Code:
3075 * 0x2003 - Error
3076 * 0x2002 - Success.
3077 **/
3078 static int
3079 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3080 unsigned tgt_id, unsigned int lun_id,
3081 uint8_t task_mgmt_cmd)
3082 {
3083 struct lpfc_hba *phba = vport->phba;
3084 struct lpfc_scsi_buf *lpfc_cmd;
3085 struct lpfc_iocbq *iocbq;
3086 struct lpfc_iocbq *iocbqrsp;
3087 int ret;
3088 int status;
3089
3090 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
3091 return FAILED;
3092
3093 lpfc_cmd = lpfc_get_scsi_buf(phba);
3094 if (lpfc_cmd == NULL)
3095 return FAILED;
3096 lpfc_cmd->timeout = 60;
3097 lpfc_cmd->rdata = rdata;
3098
3099 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3100 task_mgmt_cmd);
3101 if (!status) {
3102 lpfc_release_scsi_buf(phba, lpfc_cmd);
3103 return FAILED;
3104 }
3105
3106 iocbq = &lpfc_cmd->cur_iocbq;
3107 iocbqrsp = lpfc_sli_get_iocbq(phba);
3108 if (iocbqrsp == NULL) {
3109 lpfc_release_scsi_buf(phba, lpfc_cmd);
3110 return FAILED;
3111 }
3112
3113 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3114 "0702 Issue %s to TGT %d LUN %d "
3115 "rpi x%x nlp_flag x%x\n",
3116 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3117 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3118
3119 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3120 iocbq, iocbqrsp, lpfc_cmd->timeout);
3121 if (status != IOCB_SUCCESS) {
3122 if (status == IOCB_TIMEDOUT) {
3123 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3124 ret = TIMEOUT_ERROR;
3125 } else
3126 ret = FAILED;
3127 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3128 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3129 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3130 lpfc_taskmgmt_name(task_mgmt_cmd),
3131 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3132 iocbqrsp->iocb.un.ulpWord[4]);
3133 } else
3134 ret = SUCCESS;
3135
3136 lpfc_sli_release_iocbq(phba, iocbqrsp);
3137
3138 if (ret != TIMEOUT_ERROR)
3139 lpfc_release_scsi_buf(phba, lpfc_cmd);
3140
3141 return ret;
3142 }
3143
3144 /**
3145 * lpfc_chk_tgt_mapped -
3146 * @vport: The virtual port to check on
3147 * @cmnd: Pointer to scsi_cmnd data structure.
3148 *
3149 * This routine delays until the scsi target (aka rport) for the
3150 * command exists (is present and logged in) or we declare it non-existent.
3151 *
3152 * Return code :
3153 * 0x2003 - Error
3154 * 0x2002 - Success
3155 **/
3156 static int
3157 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3158 {
3159 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3160 struct lpfc_nodelist *pnode;
3161 unsigned long later;
3162
3163 if (!rdata) {
3164 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3165 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3166 return FAILED;
3167 }
3168 pnode = rdata->pnode;
3169 /*
3170 * If target is not in a MAPPED state, delay until
3171 * target is rediscovered or devloss timeout expires.
3172 */
3173 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3174 while (time_after(later, jiffies)) {
3175 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3176 return FAILED;
3177 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3178 return SUCCESS;
3179 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3180 rdata = cmnd->device->hostdata;
3181 if (!rdata)
3182 return FAILED;
3183 pnode = rdata->pnode;
3184 }
3185 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3186 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3187 return FAILED;
3188 return SUCCESS;
3189 }
3190
3191 /**
3192 * lpfc_reset_flush_io_context -
3193 * @vport: The virtual port (scsi_host) for the flush context
3194 * @tgt_id: If aborting by Target contect - specifies the target id
3195 * @lun_id: If aborting by Lun context - specifies the lun id
3196 * @context: specifies the context level to flush at.
3197 *
3198 * After a reset condition via TMF, we need to flush orphaned i/o
3199 * contexts from the adapter. This routine aborts any contexts
3200 * outstanding, then waits for their completions. The wait is
3201 * bounded by devloss_tmo though.
3202 *
3203 * Return code :
3204 * 0x2003 - Error
3205 * 0x2002 - Success
3206 **/
3207 static int
3208 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3209 uint64_t lun_id, lpfc_ctx_cmd context)
3210 {
3211 struct lpfc_hba *phba = vport->phba;
3212 unsigned long later;
3213 int cnt;
3214
3215 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3216 if (cnt)
3217 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3218 tgt_id, lun_id, context);
3219 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3220 while (time_after(later, jiffies) && cnt) {
3221 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3222 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3223 }
3224 if (cnt) {
3225 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3226 "0724 I/O flush failure for context %s : cnt x%x\n",
3227 ((context == LPFC_CTX_LUN) ? "LUN" :
3228 ((context == LPFC_CTX_TGT) ? "TGT" :
3229 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3230 cnt);
3231 return FAILED;
3232 }
3233 return SUCCESS;
3234 }
3235
3236 /**
3237 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3238 * @cmnd: Pointer to scsi_cmnd data structure.
3239 *
3240 * This routine does a device reset by sending a LUN_RESET task management
3241 * command.
3242 *
3243 * Return code :
3244 * 0x2003 - Error
3245 * 0x2002 - Success
3246 **/
3247 static int
3248 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3249 {
3250 struct Scsi_Host *shost = cmnd->device->host;
3251 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3252 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3253 struct lpfc_nodelist *pnode;
3254 unsigned tgt_id = cmnd->device->id;
3255 unsigned int lun_id = cmnd->device->lun;
3256 struct lpfc_scsi_event_header scsi_event;
3257 int status;
3258
3259 if (!rdata) {
3260 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3261 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3262 return FAILED;
3263 }
3264 pnode = rdata->pnode;
3265 fc_block_scsi_eh(cmnd);
3266
3267 status = lpfc_chk_tgt_mapped(vport, cmnd);
3268 if (status == FAILED) {
3269 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3270 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3271 return FAILED;
3272 }
3273
3274 scsi_event.event_type = FC_REG_SCSI_EVENT;
3275 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3276 scsi_event.lun = lun_id;
3277 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3278 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3279
3280 fc_host_post_vendor_event(shost, fc_get_event_number(),
3281 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3282
3283 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3284 FCP_LUN_RESET);
3285
3286 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3287 "0713 SCSI layer issued Device Reset (%d, %d) "
3288 "return x%x\n", tgt_id, lun_id, status);
3289
3290 /*
3291 * We have to clean up i/o as : they may be orphaned by the TMF;
3292 * or if the TMF failed, they may be in an indeterminate state.
3293 * So, continue on.
3294 * We will report success if all the i/o aborts successfully.
3295 */
3296 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3297 LPFC_CTX_LUN);
3298 return status;
3299 }
3300
3301 /**
3302 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3303 * @cmnd: Pointer to scsi_cmnd data structure.
3304 *
3305 * This routine does a target reset by sending a TARGET_RESET task management
3306 * command.
3307 *
3308 * Return code :
3309 * 0x2003 - Error
3310 * 0x2002 - Success
3311 **/
3312 static int
3313 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3314 {
3315 struct Scsi_Host *shost = cmnd->device->host;
3316 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3317 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3318 struct lpfc_nodelist *pnode;
3319 unsigned tgt_id = cmnd->device->id;
3320 unsigned int lun_id = cmnd->device->lun;
3321 struct lpfc_scsi_event_header scsi_event;
3322 int status;
3323
3324 if (!rdata) {
3325 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3326 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3327 return FAILED;
3328 }
3329 pnode = rdata->pnode;
3330 fc_block_scsi_eh(cmnd);
3331
3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 if (status == FAILED) {
3334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 return FAILED;
3337 }
3338
3339 scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 scsi_event.lun = 0;
3342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344
3345 fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347
3348 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 FCP_TARGET_RESET);
3350
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 "return x%x\n", tgt_id, lun_id, status);
3354
3355 /*
3356 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 * or if the TMF failed, they may be in an indeterminate state.
3358 * So, continue on.
3359 * We will report success if all the i/o aborts successfully.
3360 */
3361 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 LPFC_CTX_TGT);
3363 return status;
3364 }
3365
3366 /**
3367 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3368 * @cmnd: Pointer to scsi_cmnd data structure.
3369 *
3370 * This routine does target reset to all targets on @cmnd->device->host.
3371 * This emulates Parallel SCSI Bus Reset Semantics.
3372 *
3373 * Return code :
3374 * 0x2003 - Error
3375 * 0x2002 - Success
3376 **/
3377 static int
3378 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3379 {
3380 struct Scsi_Host *shost = cmnd->device->host;
3381 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3382 struct lpfc_nodelist *ndlp = NULL;
3383 struct lpfc_scsi_event_header scsi_event;
3384 int match;
3385 int ret = SUCCESS, status, i;
3386
3387 scsi_event.event_type = FC_REG_SCSI_EVENT;
3388 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3389 scsi_event.lun = 0;
3390 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3391 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3392
3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3395
3396 fc_block_scsi_eh(cmnd);
3397
3398 /*
3399 * Since the driver manages a single bus device, reset all
3400 * targets known to the driver. Should any target reset
3401 * fail, this routine returns failure to the midlayer.
3402 */
3403 for (i = 0; i < LPFC_MAX_TARGET; i++) {
3404 /* Search for mapped node by target ID */
3405 match = 0;
3406 spin_lock_irq(shost->host_lock);
3407 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3408 if (!NLP_CHK_NODE_ACT(ndlp))
3409 continue;
3410 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
3411 ndlp->nlp_sid == i &&
3412 ndlp->rport) {
3413 match = 1;
3414 break;
3415 }
3416 }
3417 spin_unlock_irq(shost->host_lock);
3418 if (!match)
3419 continue;
3420
3421 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3422 i, 0, FCP_TARGET_RESET);
3423
3424 if (status != SUCCESS) {
3425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3426 "0700 Bus Reset on target %d failed\n",
3427 i);
3428 ret = FAILED;
3429 }
3430 }
3431 /*
3432 * We have to clean up i/o as : they may be orphaned by the TMFs
3433 * above; or if any of the TMFs failed, they may be in an
3434 * indeterminate state.
3435 * We will report success if all the i/o aborts successfully.
3436 */
3437
3438 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3439 if (status != SUCCESS)
3440 ret = FAILED;
3441
3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3443 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3444 return ret;
3445 }
3446
3447 /**
3448 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
3449 * @sdev: Pointer to scsi_device.
3450 *
3451 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
3452 * globally available list of scsi buffers. This routine also makes sure scsi
3453 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3454 * of scsi buffer exists for the lifetime of the driver.
3455 *
3456 * Return codes:
3457 * non-0 - Error
3458 * 0 - Success
3459 **/
3460 static int
3461 lpfc_slave_alloc(struct scsi_device *sdev)
3462 {
3463 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3464 struct lpfc_hba *phba = vport->phba;
3465 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3466 uint32_t total = 0;
3467 uint32_t num_to_alloc = 0;
3468 int num_allocated = 0;
3469
3470 if (!rport || fc_remote_port_chkready(rport))
3471 return -ENXIO;
3472
3473 sdev->hostdata = rport->dd_data;
3474
3475 /*
3476 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3477 * available list of scsi buffers. Don't allocate more than the
3478 * HBA limit conveyed to the midlayer via the host structure. The
3479 * formula accounts for the lun_queue_depth + error handlers + 1
3480 * extra. This list of scsi bufs exists for the lifetime of the driver.
3481 */
3482 total = phba->total_scsi_bufs;
3483 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3484
3485 /* Allow some exchanges to be available always to complete discovery */
3486 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3487 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3488 "0704 At limitation of %d preallocated "
3489 "command buffers\n", total);
3490 return 0;
3491 /* Allow some exchanges to be available always to complete discovery */
3492 } else if (total + num_to_alloc >
3493 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3494 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3495 "0705 Allocation request of %d "
3496 "command buffers will exceed max of %d. "
3497 "Reducing allocation request to %d.\n",
3498 num_to_alloc, phba->cfg_hba_queue_depth,
3499 (phba->cfg_hba_queue_depth - total));
3500 num_to_alloc = phba->cfg_hba_queue_depth - total;
3501 }
3502 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3503 if (num_to_alloc != num_allocated) {
3504 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3505 "0708 Allocation request of %d "
3506 "command buffers did not succeed. "
3507 "Allocated %d buffers.\n",
3508 num_to_alloc, num_allocated);
3509 }
3510 if (num_allocated > 0)
3511 phba->total_scsi_bufs += num_allocated;
3512 return 0;
3513 }
3514
3515 /**
3516 * lpfc_slave_configure - scsi_host_template slave_configure entry point
3517 * @sdev: Pointer to scsi_device.
3518 *
3519 * This routine configures following items
3520 * - Tag command queuing support for @sdev if supported.
3521 * - Dev loss time out value of fc_rport.
3522 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3523 *
3524 * Return codes:
3525 * 0 - Success
3526 **/
3527 static int
3528 lpfc_slave_configure(struct scsi_device *sdev)
3529 {
3530 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3531 struct lpfc_hba *phba = vport->phba;
3532 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
3533
3534 if (sdev->tagged_supported)
3535 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3536 else
3537 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3538
3539 /*
3540 * Initialize the fc transport attributes for the target
3541 * containing this scsi device. Also note that the driver's
3542 * target pointer is stored in the starget_data for the
3543 * driver's sysfs entry point functions.
3544 */
3545 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3546
3547 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3548 lpfc_sli_handle_fast_ring_event(phba,
3549 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3550 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3551 lpfc_poll_rearm_timer(phba);
3552 }
3553
3554 return 0;
3555 }
3556
3557 /**
3558 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
3559 * @sdev: Pointer to scsi_device.
3560 *
3561 * This routine sets @sdev hostatdata filed to null.
3562 **/
3563 static void
3564 lpfc_slave_destroy(struct scsi_device *sdev)
3565 {
3566 sdev->hostdata = NULL;
3567 return;
3568 }
3569
3570
3571 struct scsi_host_template lpfc_template = {
3572 .module = THIS_MODULE,
3573 .name = LPFC_DRIVER_NAME,
3574 .info = lpfc_info,
3575 .queuecommand = lpfc_queuecommand,
3576 .eh_abort_handler = lpfc_abort_handler,
3577 .eh_device_reset_handler = lpfc_device_reset_handler,
3578 .eh_target_reset_handler = lpfc_target_reset_handler,
3579 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3580 .slave_alloc = lpfc_slave_alloc,
3581 .slave_configure = lpfc_slave_configure,
3582 .slave_destroy = lpfc_slave_destroy,
3583 .scan_finished = lpfc_scan_finished,
3584 .this_id = -1,
3585 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3586 .cmd_per_lun = LPFC_CMD_PER_LUN,
3587 .use_clustering = ENABLE_CLUSTERING,
3588 .shost_attrs = lpfc_hba_attrs,
3589 .max_sectors = 0xFFFF,
3590 .vendor_id = LPFC_NL_VENDOR_ID,
3591 .change_queue_depth = lpfc_change_queue_depth,
3592 };
3593
3594 struct scsi_host_template lpfc_vport_template = {
3595 .module = THIS_MODULE,
3596 .name = LPFC_DRIVER_NAME,
3597 .info = lpfc_info,
3598 .queuecommand = lpfc_queuecommand,
3599 .eh_abort_handler = lpfc_abort_handler,
3600 .eh_device_reset_handler = lpfc_device_reset_handler,
3601 .eh_target_reset_handler = lpfc_target_reset_handler,
3602 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3603 .slave_alloc = lpfc_slave_alloc,
3604 .slave_configure = lpfc_slave_configure,
3605 .slave_destroy = lpfc_slave_destroy,
3606 .scan_finished = lpfc_scan_finished,
3607 .this_id = -1,
3608 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3609 .cmd_per_lun = LPFC_CMD_PER_LUN,
3610 .use_clustering = ENABLE_CLUSTERING,
3611 .shost_attrs = lpfc_vport_attrs,
3612 .max_sectors = 0xFFFF,
3613 .change_queue_depth = lpfc_change_queue_depth,
3614 };
This page took 0.234749 seconds and 5 git commands to generate.