[SCSI] zfcp: Update message with input from review
[deliverable/linux.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2008
7 */
8
9 #include "zfcp_ext.h"
10
11 static void zfcp_fsf_request_timeout_handler(unsigned long data)
12 {
13 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
14 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
15 NULL);
16 }
17
18 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
19 unsigned long timeout)
20 {
21 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
22 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
23 fsf_req->timer.expires = jiffies + timeout;
24 add_timer(&fsf_req->timer);
25 }
26
27 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
28 {
29 BUG_ON(!fsf_req->erp_action);
30 fsf_req->timer.function = zfcp_erp_timeout_handler;
31 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
32 fsf_req->timer.expires = jiffies + 30 * HZ;
33 add_timer(&fsf_req->timer);
34 }
35
36 /* association between FSF command and FSF QTCB type */
37 static u32 fsf_qtcb_type[] = {
38 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
39 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
40 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
41 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
42 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
43 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
44 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
45 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
46 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
47 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
48 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
49 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
51 };
52
53 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
54 {
55 u16 subtable = table >> 16;
56 u16 rule = table & 0xffff;
57 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
58
59 if (subtable && subtable < ARRAY_SIZE(act_type))
60 dev_warn(&adapter->ccw_device->dev,
61 "Access denied according to ACT rule type %s, "
62 "rule %d\n", act_type[subtable], rule);
63 }
64
65 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
66 struct zfcp_port *port)
67 {
68 struct fsf_qtcb_header *header = &req->qtcb->header;
69 dev_warn(&req->adapter->ccw_device->dev,
70 "Access denied to port 0x%016Lx\n",
71 port->wwpn);
72 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
74 zfcp_erp_port_access_denied(port, 55, req);
75 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
76 }
77
78 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
79 struct zfcp_unit *unit)
80 {
81 struct fsf_qtcb_header *header = &req->qtcb->header;
82 dev_warn(&req->adapter->ccw_device->dev,
83 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
84 unit->fcp_lun, unit->port->wwpn);
85 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
86 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
87 zfcp_erp_unit_access_denied(unit, 59, req);
88 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
89 }
90
91 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
92 {
93 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
94 "operational because of an unsupported FC class\n");
95 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
96 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
97 }
98
99 /**
100 * zfcp_fsf_req_free - free memory used by fsf request
101 * @fsf_req: pointer to struct zfcp_fsf_req
102 */
103 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
104 {
105 if (likely(req->pool)) {
106 mempool_free(req, req->pool);
107 return;
108 }
109
110 if (req->qtcb) {
111 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
112 return;
113 }
114 }
115
116 /**
117 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
118 * @adapter: pointer to struct zfcp_adapter
119 *
120 * Never ever call this without shutting down the adapter first.
121 * Otherwise the adapter would continue using and corrupting s390 storage.
122 * Included BUG_ON() call to ensure this is done.
123 * ERP is supposed to be the only user of this function.
124 */
125 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
126 {
127 struct zfcp_fsf_req *req, *tmp;
128 unsigned long flags;
129 LIST_HEAD(remove_queue);
130 unsigned int i;
131
132 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
133 spin_lock_irqsave(&adapter->req_list_lock, flags);
134 for (i = 0; i < REQUEST_LIST_SIZE; i++)
135 list_splice_init(&adapter->req_list[i], &remove_queue);
136 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
137
138 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
139 list_del(&req->list);
140 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
141 zfcp_fsf_req_complete(req);
142 }
143 }
144
145 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
146 {
147 struct fsf_status_read_buffer *sr_buf = req->data;
148 struct zfcp_adapter *adapter = req->adapter;
149 struct zfcp_port *port;
150 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
151 unsigned long flags;
152
153 read_lock_irqsave(&zfcp_data.config_lock, flags);
154 list_for_each_entry(port, &adapter->port_list_head, list)
155 if (port->d_id == d_id) {
156 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
157 switch (sr_buf->status_subtype) {
158 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
159 zfcp_erp_port_reopen(port, 0, 101, req);
160 break;
161 case FSF_STATUS_READ_SUB_ERROR_PORT:
162 zfcp_erp_port_shutdown(port, 0, 122, req);
163 break;
164 }
165 return;
166 }
167 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
168 }
169
170 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
171 struct fsf_link_down_info *link_down)
172 {
173 struct zfcp_adapter *adapter = req->adapter;
174
175 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
176 return;
177
178 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
179
180 if (!link_down)
181 goto out;
182
183 switch (link_down->error_code) {
184 case FSF_PSQ_LINK_NO_LIGHT:
185 dev_warn(&req->adapter->ccw_device->dev,
186 "There is no light signal from the local "
187 "fibre channel cable\n");
188 break;
189 case FSF_PSQ_LINK_WRAP_PLUG:
190 dev_warn(&req->adapter->ccw_device->dev,
191 "There is a wrap plug instead of a fibre "
192 "channel cable\n");
193 break;
194 case FSF_PSQ_LINK_NO_FCP:
195 dev_warn(&req->adapter->ccw_device->dev,
196 "The adjacent fibre channel node does not "
197 "support FCP\n");
198 break;
199 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
200 dev_warn(&req->adapter->ccw_device->dev,
201 "The FCP device is suspended because of a "
202 "firmware update\n");
203 break;
204 case FSF_PSQ_LINK_INVALID_WWPN:
205 dev_warn(&req->adapter->ccw_device->dev,
206 "The FCP device detected a WWPN that is "
207 "duplicate or not valid\n");
208 break;
209 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
210 dev_warn(&req->adapter->ccw_device->dev,
211 "The fibre channel fabric does not support NPIV\n");
212 break;
213 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
214 dev_warn(&req->adapter->ccw_device->dev,
215 "The FCP adapter cannot support more NPIV ports\n");
216 break;
217 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
218 dev_warn(&req->adapter->ccw_device->dev,
219 "The adjacent switch cannot support "
220 "more NPIV ports\n");
221 break;
222 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
223 dev_warn(&req->adapter->ccw_device->dev,
224 "The FCP adapter could not log in to the "
225 "fibre channel fabric\n");
226 break;
227 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
228 dev_warn(&req->adapter->ccw_device->dev,
229 "The WWPN assignment file on the FCP adapter "
230 "has been damaged\n");
231 break;
232 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
233 dev_warn(&req->adapter->ccw_device->dev,
234 "The mode table on the FCP adapter "
235 "has been damaged\n");
236 break;
237 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
238 dev_warn(&req->adapter->ccw_device->dev,
239 "All NPIV ports on the FCP adapter have "
240 "been assigned\n");
241 break;
242 default:
243 dev_warn(&req->adapter->ccw_device->dev,
244 "The link between the FCP adapter and "
245 "the FC fabric is down\n");
246 }
247 out:
248 zfcp_erp_adapter_failed(adapter, id, req);
249 }
250
251 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
252 {
253 struct fsf_status_read_buffer *sr_buf = req->data;
254 struct fsf_link_down_info *ldi =
255 (struct fsf_link_down_info *) &sr_buf->payload;
256
257 switch (sr_buf->status_subtype) {
258 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
259 zfcp_fsf_link_down_info_eval(req, 38, ldi);
260 break;
261 case FSF_STATUS_READ_SUB_FDISC_FAILED:
262 zfcp_fsf_link_down_info_eval(req, 39, ldi);
263 break;
264 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
265 zfcp_fsf_link_down_info_eval(req, 40, NULL);
266 };
267 }
268
269 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
270 {
271 struct zfcp_adapter *adapter = req->adapter;
272 struct fsf_status_read_buffer *sr_buf = req->data;
273
274 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
275 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
276 mempool_free(sr_buf, adapter->pool.data_status_read);
277 zfcp_fsf_req_free(req);
278 return;
279 }
280
281 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
282
283 switch (sr_buf->status_type) {
284 case FSF_STATUS_READ_PORT_CLOSED:
285 zfcp_fsf_status_read_port_closed(req);
286 break;
287 case FSF_STATUS_READ_INCOMING_ELS:
288 zfcp_fc_incoming_els(req);
289 break;
290 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
291 break;
292 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
293 dev_warn(&adapter->ccw_device->dev,
294 "The error threshold for checksum statistics "
295 "has been exceeded\n");
296 break;
297 case FSF_STATUS_READ_LINK_DOWN:
298 zfcp_fsf_status_read_link_down(req);
299 break;
300 case FSF_STATUS_READ_LINK_UP:
301 dev_info(&adapter->ccw_device->dev,
302 "The local link has been restored\n");
303 /* All ports should be marked as ready to run again */
304 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
305 ZFCP_STATUS_COMMON_RUNNING,
306 ZFCP_SET);
307 zfcp_erp_adapter_reopen(adapter,
308 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
309 ZFCP_STATUS_COMMON_ERP_FAILED,
310 102, req);
311 break;
312 case FSF_STATUS_READ_NOTIFICATION_LOST:
313 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
314 zfcp_erp_adapter_access_changed(adapter, 135, req);
315 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
316 schedule_work(&adapter->scan_work);
317 break;
318 case FSF_STATUS_READ_CFDC_UPDATED:
319 zfcp_erp_adapter_access_changed(adapter, 136, req);
320 break;
321 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
322 adapter->adapter_features = sr_buf->payload.word[0];
323 break;
324 }
325
326 mempool_free(sr_buf, adapter->pool.data_status_read);
327 zfcp_fsf_req_free(req);
328
329 atomic_inc(&adapter->stat_miss);
330 schedule_work(&adapter->stat_work);
331 }
332
333 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
334 {
335 switch (req->qtcb->header.fsf_status_qual.word[0]) {
336 case FSF_SQ_FCP_RSP_AVAILABLE:
337 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
338 case FSF_SQ_NO_RETRY_POSSIBLE:
339 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
340 return;
341 case FSF_SQ_COMMAND_ABORTED:
342 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
343 break;
344 case FSF_SQ_NO_RECOM:
345 dev_err(&req->adapter->ccw_device->dev,
346 "The FCP adapter reported a problem "
347 "that cannot be recovered\n");
348 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
349 break;
350 }
351 /* all non-return stats set FSFREQ_ERROR*/
352 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
353 }
354
355 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
356 {
357 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
358 return;
359
360 switch (req->qtcb->header.fsf_status) {
361 case FSF_UNKNOWN_COMMAND:
362 dev_err(&req->adapter->ccw_device->dev,
363 "The FCP adapter does not recognize the command 0x%x\n",
364 req->qtcb->header.fsf_command);
365 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
366 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
367 break;
368 case FSF_ADAPTER_STATUS_AVAILABLE:
369 zfcp_fsf_fsfstatus_qual_eval(req);
370 break;
371 }
372 }
373
374 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
375 {
376 struct zfcp_adapter *adapter = req->adapter;
377 struct fsf_qtcb *qtcb = req->qtcb;
378 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
379
380 zfcp_hba_dbf_event_fsf_response(req);
381
382 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
383 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
384 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
385 return;
386 }
387
388 switch (qtcb->prefix.prot_status) {
389 case FSF_PROT_GOOD:
390 case FSF_PROT_FSF_STATUS_PRESENTED:
391 return;
392 case FSF_PROT_QTCB_VERSION_ERROR:
393 dev_err(&adapter->ccw_device->dev,
394 "QTCB version 0x%x not supported by FCP adapter "
395 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
396 psq->word[0], psq->word[1]);
397 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
398 break;
399 case FSF_PROT_ERROR_STATE:
400 case FSF_PROT_SEQ_NUMB_ERROR:
401 zfcp_erp_adapter_reopen(adapter, 0, 98, req);
402 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
403 break;
404 case FSF_PROT_UNSUPP_QTCB_TYPE:
405 dev_err(&adapter->ccw_device->dev,
406 "The QTCB type is not supported by the FCP adapter\n");
407 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
408 break;
409 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
410 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
411 &adapter->status);
412 break;
413 case FSF_PROT_DUPLICATE_REQUEST_ID:
414 dev_err(&adapter->ccw_device->dev,
415 "0x%Lx is an ambiguous request identifier\n",
416 (unsigned long long)qtcb->bottom.support.req_handle);
417 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
418 break;
419 case FSF_PROT_LINK_DOWN:
420 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
421 /* FIXME: reopening adapter now? better wait for link up */
422 zfcp_erp_adapter_reopen(adapter, 0, 79, req);
423 break;
424 case FSF_PROT_REEST_QUEUE:
425 /* All ports should be marked as ready to run again */
426 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
427 ZFCP_STATUS_COMMON_RUNNING,
428 ZFCP_SET);
429 zfcp_erp_adapter_reopen(adapter,
430 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
431 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
432 break;
433 default:
434 dev_err(&adapter->ccw_device->dev,
435 "0x%x is not a valid transfer protocol status\n",
436 qtcb->prefix.prot_status);
437 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
438 }
439 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
440 }
441
442 /**
443 * zfcp_fsf_req_complete - process completion of a FSF request
444 * @fsf_req: The FSF request that has been completed.
445 *
446 * When a request has been completed either from the FCP adapter,
447 * or it has been dismissed due to a queue shutdown, this function
448 * is called to process the completion status and trigger further
449 * events related to the FSF request.
450 */
451 void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
452 {
453 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
454 zfcp_fsf_status_read_handler(req);
455 return;
456 }
457
458 del_timer(&req->timer);
459 zfcp_fsf_protstatus_eval(req);
460 zfcp_fsf_fsfstatus_eval(req);
461 req->handler(req);
462
463 if (req->erp_action)
464 zfcp_erp_notify(req->erp_action, 0);
465 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
466
467 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
468 zfcp_fsf_req_free(req);
469 else
470 /* notify initiator waiting for the requests completion */
471 /*
472 * FIXME: Race! We must not access fsf_req here as it might have been
473 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
474 * flag. It's an improbable case. But, we have the same paranoia for
475 * the cleanup flag already.
476 * Might better be handled using complete()?
477 * (setting the flag and doing wakeup ought to be atomic
478 * with regard to checking the flag as long as waitqueue is
479 * part of the to be released structure)
480 */
481 wake_up(&req->completion_wq);
482 }
483
484 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
485 {
486 struct fsf_qtcb_bottom_config *bottom;
487 struct zfcp_adapter *adapter = req->adapter;
488 struct Scsi_Host *shost = adapter->scsi_host;
489
490 bottom = &req->qtcb->bottom.config;
491
492 if (req->data)
493 memcpy(req->data, bottom, sizeof(*bottom));
494
495 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
496 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
497 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
498 fc_host_speed(shost) = bottom->fc_link_speed;
499 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
500
501 adapter->hydra_version = bottom->adapter_type;
502 adapter->timer_ticks = bottom->timer_interval;
503
504 if (fc_host_permanent_port_name(shost) == -1)
505 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
506
507 switch (bottom->fc_topology) {
508 case FSF_TOPO_P2P:
509 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
510 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
511 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
512 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
513 break;
514 case FSF_TOPO_FABRIC:
515 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
516 break;
517 case FSF_TOPO_AL:
518 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
519 default:
520 dev_err(&adapter->ccw_device->dev,
521 "Unknown or unsupported arbitrated loop "
522 "fibre channel topology detected\n");
523 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
524 return -EIO;
525 }
526
527 return 0;
528 }
529
530 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
531 {
532 struct zfcp_adapter *adapter = req->adapter;
533 struct fsf_qtcb *qtcb = req->qtcb;
534 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
535 struct Scsi_Host *shost = adapter->scsi_host;
536
537 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
538 return;
539
540 adapter->fsf_lic_version = bottom->lic_version;
541 adapter->adapter_features = bottom->adapter_features;
542 adapter->connection_features = bottom->connection_features;
543 adapter->peer_wwpn = 0;
544 adapter->peer_wwnn = 0;
545 adapter->peer_d_id = 0;
546
547 switch (qtcb->header.fsf_status) {
548 case FSF_GOOD:
549 if (zfcp_fsf_exchange_config_evaluate(req))
550 return;
551
552 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
553 dev_err(&adapter->ccw_device->dev,
554 "FCP adapter maximum QTCB size (%d bytes) "
555 "is too small\n",
556 bottom->max_qtcb_size);
557 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
558 return;
559 }
560 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
561 &adapter->status);
562 break;
563 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
564 fc_host_node_name(shost) = 0;
565 fc_host_port_name(shost) = 0;
566 fc_host_port_id(shost) = 0;
567 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
568 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
569 adapter->hydra_version = 0;
570
571 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
572 &adapter->status);
573
574 zfcp_fsf_link_down_info_eval(req, 42,
575 &qtcb->header.fsf_status_qual.link_down_info);
576 break;
577 default:
578 zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
579 return;
580 }
581
582 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
583 adapter->hardware_version = bottom->hardware_version;
584 memcpy(fc_host_serial_number(shost), bottom->serial_number,
585 min(FC_SERIAL_NUMBER_SIZE, 17));
586 EBCASC(fc_host_serial_number(shost),
587 min(FC_SERIAL_NUMBER_SIZE, 17));
588 }
589
590 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
591 dev_err(&adapter->ccw_device->dev,
592 "The FCP adapter only supports newer "
593 "control block versions\n");
594 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
595 return;
596 }
597 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
598 dev_err(&adapter->ccw_device->dev,
599 "The FCP adapter only supports older "
600 "control block versions\n");
601 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
602 }
603 }
604
605 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
606 {
607 struct zfcp_adapter *adapter = req->adapter;
608 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
609 struct Scsi_Host *shost = adapter->scsi_host;
610
611 if (req->data)
612 memcpy(req->data, bottom, sizeof(*bottom));
613
614 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
615 fc_host_permanent_port_name(shost) = bottom->wwpn;
616 else
617 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
618 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
619 fc_host_supported_speeds(shost) = bottom->supported_speed;
620 }
621
622 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
623 {
624 struct zfcp_adapter *adapter = req->adapter;
625 struct fsf_qtcb *qtcb = req->qtcb;
626
627 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
628 return;
629
630 switch (qtcb->header.fsf_status) {
631 case FSF_GOOD:
632 zfcp_fsf_exchange_port_evaluate(req);
633 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
634 break;
635 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
636 zfcp_fsf_exchange_port_evaluate(req);
637 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
638 zfcp_fsf_link_down_info_eval(req, 43,
639 &qtcb->header.fsf_status_qual.link_down_info);
640 break;
641 }
642 }
643
644 static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue)
645 {
646 spin_lock_bh(&queue->lock);
647 if (atomic_read(&queue->count))
648 return 1;
649 spin_unlock_bh(&queue->lock);
650 return 0;
651 }
652
653 static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
654 {
655 unsigned int count = atomic_read(&adapter->req_q.count);
656 if (!count)
657 atomic_inc(&adapter->qdio_outb_full);
658 return count > 0;
659 }
660
661 static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
662 {
663 long ret;
664 struct zfcp_qdio_queue *req_q = &adapter->req_q;
665
666 spin_unlock_bh(&req_q->lock);
667 ret = wait_event_interruptible_timeout(adapter->request_wq,
668 zfcp_fsf_sbal_check(req_q), 5 * HZ);
669 if (ret > 0)
670 return 0;
671 if (!ret)
672 atomic_inc(&adapter->qdio_outb_full);
673
674 spin_lock_bh(&req_q->lock);
675 return -EIO;
676 }
677
678 static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
679 {
680 struct zfcp_fsf_req *req;
681 req = mempool_alloc(pool, GFP_ATOMIC);
682 if (!req)
683 return NULL;
684 memset(req, 0, sizeof(*req));
685 return req;
686 }
687
688 static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
689 {
690 struct zfcp_fsf_req_qtcb *qtcb;
691
692 if (likely(pool))
693 qtcb = mempool_alloc(pool, GFP_ATOMIC);
694 else
695 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
696 GFP_ATOMIC);
697 if (unlikely(!qtcb))
698 return NULL;
699
700 memset(qtcb, 0, sizeof(*qtcb));
701 qtcb->fsf_req.qtcb = &qtcb->qtcb;
702 qtcb->fsf_req.pool = pool;
703
704 return &qtcb->fsf_req;
705 }
706
707 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
708 u32 fsf_cmd, int req_flags,
709 mempool_t *pool)
710 {
711 volatile struct qdio_buffer_element *sbale;
712
713 struct zfcp_fsf_req *req;
714 struct zfcp_qdio_queue *req_q = &adapter->req_q;
715
716 if (req_flags & ZFCP_REQ_NO_QTCB)
717 req = zfcp_fsf_alloc_noqtcb(pool);
718 else
719 req = zfcp_fsf_alloc_qtcb(pool);
720
721 if (unlikely(!req))
722 return ERR_PTR(-EIO);
723
724 if (adapter->req_no == 0)
725 adapter->req_no++;
726
727 INIT_LIST_HEAD(&req->list);
728 init_timer(&req->timer);
729 init_waitqueue_head(&req->completion_wq);
730
731 req->adapter = adapter;
732 req->fsf_command = fsf_cmd;
733 req->req_id = adapter->req_no++;
734 req->sbal_number = 1;
735 req->sbal_first = req_q->first;
736 req->sbal_last = req_q->first;
737 req->sbale_curr = 1;
738
739 sbale = zfcp_qdio_sbale_req(req);
740 sbale[0].addr = (void *) req->req_id;
741 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
742
743 if (likely(req->qtcb)) {
744 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
745 req->qtcb->prefix.req_id = req->req_id;
746 req->qtcb->prefix.ulp_info = 26;
747 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
748 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
749 req->qtcb->header.req_handle = req->req_id;
750 req->qtcb->header.fsf_command = req->fsf_command;
751 req->seq_no = adapter->fsf_req_seq_no;
752 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
753 sbale[1].addr = (void *) req->qtcb;
754 sbale[1].length = sizeof(struct fsf_qtcb);
755 }
756
757 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
758 zfcp_fsf_req_free(req);
759 return ERR_PTR(-EIO);
760 }
761
762 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
763 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
764
765 return req;
766 }
767
768 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
769 {
770 struct zfcp_adapter *adapter = req->adapter;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q;
772 int idx;
773
774 /* put allocated FSF request into hash table */
775 spin_lock(&adapter->req_list_lock);
776 idx = zfcp_reqlist_hash(req->req_id);
777 list_add_tail(&req->list, &adapter->req_list[idx]);
778 spin_unlock(&adapter->req_list_lock);
779
780 req->issued = get_clock();
781 if (zfcp_qdio_send(req)) {
782 /* Queues are down..... */
783 del_timer(&req->timer);
784 spin_lock(&adapter->req_list_lock);
785 zfcp_reqlist_remove(adapter, req);
786 spin_unlock(&adapter->req_list_lock);
787 /* undo changes in request queue made for this request */
788 atomic_add(req->sbal_number, &req_q->count);
789 req_q->first -= req->sbal_number;
790 req_q->first += QDIO_MAX_BUFFERS_PER_Q;
791 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
792 zfcp_erp_adapter_reopen(adapter, 0, 116, req);
793 return -EIO;
794 }
795
796 /* Don't increase for unsolicited status */
797 if (req->qtcb)
798 adapter->fsf_req_seq_no++;
799
800 return 0;
801 }
802
803 /**
804 * zfcp_fsf_status_read - send status read request
805 * @adapter: pointer to struct zfcp_adapter
806 * @req_flags: request flags
807 * Returns: 0 on success, ERROR otherwise
808 */
809 int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
810 {
811 struct zfcp_fsf_req *req;
812 struct fsf_status_read_buffer *sr_buf;
813 volatile struct qdio_buffer_element *sbale;
814 int retval = -EIO;
815
816 spin_lock_bh(&adapter->req_q.lock);
817 if (zfcp_fsf_req_sbal_get(adapter))
818 goto out;
819
820 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
821 ZFCP_REQ_NO_QTCB,
822 adapter->pool.fsf_req_status_read);
823 if (IS_ERR(req)) {
824 retval = PTR_ERR(req);
825 goto out;
826 }
827
828 sbale = zfcp_qdio_sbale_req(req);
829 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
830 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
831 req->sbale_curr = 2;
832
833 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
834 if (!sr_buf) {
835 retval = -ENOMEM;
836 goto failed_buf;
837 }
838 memset(sr_buf, 0, sizeof(*sr_buf));
839 req->data = sr_buf;
840 sbale = zfcp_qdio_sbale_curr(req);
841 sbale->addr = (void *) sr_buf;
842 sbale->length = sizeof(*sr_buf);
843
844 retval = zfcp_fsf_req_send(req);
845 if (retval)
846 goto failed_req_send;
847
848 goto out;
849
850 failed_req_send:
851 mempool_free(sr_buf, adapter->pool.data_status_read);
852 failed_buf:
853 zfcp_fsf_req_free(req);
854 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
855 out:
856 spin_unlock_bh(&adapter->req_q.lock);
857 return retval;
858 }
859
860 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
861 {
862 struct zfcp_unit *unit = req->data;
863 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
864
865 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
866 return;
867
868 switch (req->qtcb->header.fsf_status) {
869 case FSF_PORT_HANDLE_NOT_VALID:
870 if (fsq->word[0] == fsq->word[1]) {
871 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
872 req);
873 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
874 }
875 break;
876 case FSF_LUN_HANDLE_NOT_VALID:
877 if (fsq->word[0] == fsq->word[1]) {
878 zfcp_erp_port_reopen(unit->port, 0, 105, req);
879 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
880 }
881 break;
882 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
883 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
884 break;
885 case FSF_PORT_BOXED:
886 zfcp_erp_port_boxed(unit->port, 47, req);
887 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
888 ZFCP_STATUS_FSFREQ_RETRY;
889 break;
890 case FSF_LUN_BOXED:
891 zfcp_erp_unit_boxed(unit, 48, req);
892 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
893 ZFCP_STATUS_FSFREQ_RETRY;
894 break;
895 case FSF_ADAPTER_STATUS_AVAILABLE:
896 switch (fsq->word[0]) {
897 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
898 zfcp_test_link(unit->port);
899 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
900 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
901 break;
902 }
903 break;
904 case FSF_GOOD:
905 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
906 break;
907 }
908 }
909
910 /**
911 * zfcp_fsf_abort_fcp_command - abort running SCSI command
912 * @old_req_id: unsigned long
913 * @adapter: pointer to struct zfcp_adapter
914 * @unit: pointer to struct zfcp_unit
915 * @req_flags: integer specifying the request flags
916 * Returns: pointer to struct zfcp_fsf_req
917 *
918 * FIXME(design): should be watched by a timeout !!!
919 */
920
921 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
922 struct zfcp_adapter *adapter,
923 struct zfcp_unit *unit,
924 int req_flags)
925 {
926 volatile struct qdio_buffer_element *sbale;
927 struct zfcp_fsf_req *req = NULL;
928
929 spin_lock(&adapter->req_q.lock);
930 if (!zfcp_fsf_sbal_available(adapter))
931 goto out;
932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
933 req_flags, adapter->pool.fsf_req_abort);
934 if (IS_ERR(req))
935 goto out;
936
937 if (unlikely(!(atomic_read(&unit->status) &
938 ZFCP_STATUS_COMMON_UNBLOCKED)))
939 goto out_error_free;
940
941 sbale = zfcp_qdio_sbale_req(req);
942 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
943 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
944
945 req->data = unit;
946 req->handler = zfcp_fsf_abort_fcp_command_handler;
947 req->qtcb->header.lun_handle = unit->handle;
948 req->qtcb->header.port_handle = unit->port->handle;
949 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
950
951 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
952 if (!zfcp_fsf_req_send(req))
953 goto out;
954
955 out_error_free:
956 zfcp_fsf_req_free(req);
957 req = NULL;
958 out:
959 spin_unlock(&adapter->req_q.lock);
960 return req;
961 }
962
963 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
964 {
965 struct zfcp_adapter *adapter = req->adapter;
966 struct zfcp_send_ct *send_ct = req->data;
967 struct zfcp_port *port = send_ct->port;
968 struct fsf_qtcb_header *header = &req->qtcb->header;
969
970 send_ct->status = -EINVAL;
971
972 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
973 goto skip_fsfstatus;
974
975 switch (header->fsf_status) {
976 case FSF_GOOD:
977 zfcp_san_dbf_event_ct_response(req);
978 send_ct->status = 0;
979 break;
980 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
981 zfcp_fsf_class_not_supp(req);
982 break;
983 case FSF_ADAPTER_STATUS_AVAILABLE:
984 switch (header->fsf_status_qual.word[0]){
985 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
986 zfcp_test_link(port);
987 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
988 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
989 break;
990 }
991 break;
992 case FSF_ACCESS_DENIED:
993 zfcp_fsf_access_denied_port(req, port);
994 break;
995 case FSF_PORT_BOXED:
996 zfcp_erp_port_boxed(port, 49, req);
997 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
998 ZFCP_STATUS_FSFREQ_RETRY;
999 break;
1000 case FSF_PORT_HANDLE_NOT_VALID:
1001 zfcp_erp_adapter_reopen(adapter, 0, 106, req);
1002 case FSF_GENERIC_COMMAND_REJECTED:
1003 case FSF_PAYLOAD_SIZE_MISMATCH:
1004 case FSF_REQUEST_SIZE_TOO_LARGE:
1005 case FSF_RESPONSE_SIZE_TOO_LARGE:
1006 case FSF_SBAL_MISMATCH:
1007 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1008 break;
1009 }
1010
1011 skip_fsfstatus:
1012 if (send_ct->handler)
1013 send_ct->handler(send_ct->handler_data);
1014 }
1015
1016 static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1017 struct scatterlist *sg_req,
1018 struct scatterlist *sg_resp, int max_sbals)
1019 {
1020 int bytes;
1021
1022 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1023 sg_req, max_sbals);
1024 if (bytes <= 0)
1025 return -ENOMEM;
1026 req->qtcb->bottom.support.req_buf_length = bytes;
1027 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1028
1029 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1030 sg_resp, max_sbals);
1031 if (bytes <= 0)
1032 return -ENOMEM;
1033 req->qtcb->bottom.support.resp_buf_length = bytes;
1034
1035 return 0;
1036 }
1037
1038 /**
1039 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1040 * @ct: pointer to struct zfcp_send_ct with data for request
1041 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1042 * @erp_action: if non-null the Generic Service request sent within ERP
1043 */
1044 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1045 struct zfcp_erp_action *erp_action)
1046 {
1047 struct zfcp_port *port = ct->port;
1048 struct zfcp_adapter *adapter = port->adapter;
1049 struct zfcp_fsf_req *req;
1050 int ret = -EIO;
1051
1052 spin_lock_bh(&adapter->req_q.lock);
1053 if (zfcp_fsf_req_sbal_get(adapter))
1054 goto out;
1055
1056 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1057 ZFCP_REQ_AUTO_CLEANUP, pool);
1058 if (IS_ERR(req)) {
1059 ret = PTR_ERR(req);
1060 goto out;
1061 }
1062
1063 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
1064 FSF_MAX_SBALS_PER_REQ);
1065 if (ret)
1066 goto failed_send;
1067
1068 req->handler = zfcp_fsf_send_ct_handler;
1069 req->qtcb->header.port_handle = port->handle;
1070 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1071 req->qtcb->bottom.support.timeout = ct->timeout;
1072 req->data = ct;
1073
1074 zfcp_san_dbf_event_ct_request(req);
1075
1076 if (erp_action) {
1077 erp_action->fsf_req = req;
1078 req->erp_action = erp_action;
1079 zfcp_fsf_start_erp_timer(req);
1080 } else
1081 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1082
1083 ret = zfcp_fsf_req_send(req);
1084 if (ret)
1085 goto failed_send;
1086
1087 goto out;
1088
1089 failed_send:
1090 zfcp_fsf_req_free(req);
1091 if (erp_action)
1092 erp_action->fsf_req = NULL;
1093 out:
1094 spin_unlock_bh(&adapter->req_q.lock);
1095 return ret;
1096 }
1097
1098 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1099 {
1100 struct zfcp_send_els *send_els = req->data;
1101 struct zfcp_port *port = send_els->port;
1102 struct fsf_qtcb_header *header = &req->qtcb->header;
1103
1104 send_els->status = -EINVAL;
1105
1106 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1107 goto skip_fsfstatus;
1108
1109 switch (header->fsf_status) {
1110 case FSF_GOOD:
1111 zfcp_san_dbf_event_els_response(req);
1112 send_els->status = 0;
1113 break;
1114 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1115 zfcp_fsf_class_not_supp(req);
1116 break;
1117 case FSF_ADAPTER_STATUS_AVAILABLE:
1118 switch (header->fsf_status_qual.word[0]){
1119 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1120 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1121 zfcp_test_link(port);
1122 /*fall through */
1123 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1124 case FSF_SQ_RETRY_IF_POSSIBLE:
1125 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1126 break;
1127 }
1128 break;
1129 case FSF_ELS_COMMAND_REJECTED:
1130 case FSF_PAYLOAD_SIZE_MISMATCH:
1131 case FSF_REQUEST_SIZE_TOO_LARGE:
1132 case FSF_RESPONSE_SIZE_TOO_LARGE:
1133 break;
1134 case FSF_ACCESS_DENIED:
1135 zfcp_fsf_access_denied_port(req, port);
1136 break;
1137 case FSF_SBAL_MISMATCH:
1138 /* should never occure, avoided in zfcp_fsf_send_els */
1139 /* fall through */
1140 default:
1141 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1142 break;
1143 }
1144 skip_fsfstatus:
1145 if (send_els->handler)
1146 send_els->handler(send_els->handler_data);
1147 }
1148
1149 /**
1150 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1151 * @els: pointer to struct zfcp_send_els with data for the command
1152 */
1153 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1154 {
1155 struct zfcp_fsf_req *req;
1156 struct zfcp_adapter *adapter = els->adapter;
1157 struct fsf_qtcb_bottom_support *bottom;
1158 int ret = -EIO;
1159
1160 if (unlikely(!(atomic_read(&els->port->status) &
1161 ZFCP_STATUS_COMMON_UNBLOCKED)))
1162 return -EBUSY;
1163
1164 spin_lock(&adapter->req_q.lock);
1165 if (!zfcp_fsf_sbal_available(adapter))
1166 goto out;
1167 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1168 ZFCP_REQ_AUTO_CLEANUP, NULL);
1169 if (IS_ERR(req)) {
1170 ret = PTR_ERR(req);
1171 goto out;
1172 }
1173
1174 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp,
1175 FSF_MAX_SBALS_PER_ELS_REQ);
1176 if (ret)
1177 goto failed_send;
1178
1179 bottom = &req->qtcb->bottom.support;
1180 req->handler = zfcp_fsf_send_els_handler;
1181 bottom->d_id = els->d_id;
1182 bottom->service_class = FSF_CLASS_3;
1183 bottom->timeout = 2 * R_A_TOV;
1184 req->data = els;
1185
1186 zfcp_san_dbf_event_els_request(req);
1187
1188 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1189 ret = zfcp_fsf_req_send(req);
1190 if (ret)
1191 goto failed_send;
1192
1193 goto out;
1194
1195 failed_send:
1196 zfcp_fsf_req_free(req);
1197 out:
1198 spin_unlock(&adapter->req_q.lock);
1199 return ret;
1200 }
1201
1202 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1203 {
1204 volatile struct qdio_buffer_element *sbale;
1205 struct zfcp_fsf_req *req;
1206 struct zfcp_adapter *adapter = erp_action->adapter;
1207 int retval = -EIO;
1208
1209 spin_lock_bh(&adapter->req_q.lock);
1210 if (!zfcp_fsf_sbal_available(adapter))
1211 goto out;
1212 req = zfcp_fsf_req_create(adapter,
1213 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1214 ZFCP_REQ_AUTO_CLEANUP,
1215 adapter->pool.fsf_req_erp);
1216 if (IS_ERR(req)) {
1217 retval = PTR_ERR(req);
1218 goto out;
1219 }
1220
1221 sbale = zfcp_qdio_sbale_req(req);
1222 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1223 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1224
1225 req->qtcb->bottom.config.feature_selection =
1226 FSF_FEATURE_CFDC |
1227 FSF_FEATURE_LUN_SHARING |
1228 FSF_FEATURE_NOTIFICATION_LOST |
1229 FSF_FEATURE_UPDATE_ALERT;
1230 req->erp_action = erp_action;
1231 req->handler = zfcp_fsf_exchange_config_data_handler;
1232 erp_action->fsf_req = req;
1233
1234 zfcp_fsf_start_erp_timer(req);
1235 retval = zfcp_fsf_req_send(req);
1236 if (retval) {
1237 zfcp_fsf_req_free(req);
1238 erp_action->fsf_req = NULL;
1239 }
1240 out:
1241 spin_unlock_bh(&adapter->req_q.lock);
1242 return retval;
1243 }
1244
1245 int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1246 struct fsf_qtcb_bottom_config *data)
1247 {
1248 volatile struct qdio_buffer_element *sbale;
1249 struct zfcp_fsf_req *req = NULL;
1250 int retval = -EIO;
1251
1252 spin_lock_bh(&adapter->req_q.lock);
1253 if (zfcp_fsf_req_sbal_get(adapter))
1254 goto out;
1255
1256 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1257 0, NULL);
1258 if (IS_ERR(req)) {
1259 retval = PTR_ERR(req);
1260 goto out;
1261 }
1262
1263 sbale = zfcp_qdio_sbale_req(req);
1264 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1265 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1266 req->handler = zfcp_fsf_exchange_config_data_handler;
1267
1268 req->qtcb->bottom.config.feature_selection =
1269 FSF_FEATURE_CFDC |
1270 FSF_FEATURE_LUN_SHARING |
1271 FSF_FEATURE_NOTIFICATION_LOST |
1272 FSF_FEATURE_UPDATE_ALERT;
1273
1274 if (data)
1275 req->data = data;
1276
1277 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1278 retval = zfcp_fsf_req_send(req);
1279 out:
1280 spin_unlock_bh(&adapter->req_q.lock);
1281 if (!retval)
1282 wait_event(req->completion_wq,
1283 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1284
1285 zfcp_fsf_req_free(req);
1286
1287 return retval;
1288 }
1289
1290 /**
1291 * zfcp_fsf_exchange_port_data - request information about local port
1292 * @erp_action: ERP action for the adapter for which port data is requested
1293 * Returns: 0 on success, error otherwise
1294 */
1295 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1296 {
1297 volatile struct qdio_buffer_element *sbale;
1298 struct zfcp_fsf_req *req;
1299 struct zfcp_adapter *adapter = erp_action->adapter;
1300 int retval = -EIO;
1301
1302 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1303 return -EOPNOTSUPP;
1304
1305 spin_lock_bh(&adapter->req_q.lock);
1306 if (!zfcp_fsf_sbal_available(adapter))
1307 goto out;
1308 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1309 ZFCP_REQ_AUTO_CLEANUP,
1310 adapter->pool.fsf_req_erp);
1311 if (IS_ERR(req)) {
1312 retval = PTR_ERR(req);
1313 goto out;
1314 }
1315
1316 sbale = zfcp_qdio_sbale_req(req);
1317 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1318 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1319
1320 req->handler = zfcp_fsf_exchange_port_data_handler;
1321 req->erp_action = erp_action;
1322 erp_action->fsf_req = req;
1323
1324 zfcp_fsf_start_erp_timer(req);
1325 retval = zfcp_fsf_req_send(req);
1326 if (retval) {
1327 zfcp_fsf_req_free(req);
1328 erp_action->fsf_req = NULL;
1329 }
1330 out:
1331 spin_unlock_bh(&adapter->req_q.lock);
1332 return retval;
1333 }
1334
1335 /**
1336 * zfcp_fsf_exchange_port_data_sync - request information about local port
1337 * @adapter: pointer to struct zfcp_adapter
1338 * @data: pointer to struct fsf_qtcb_bottom_port
1339 * Returns: 0 on success, error otherwise
1340 */
1341 int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1342 struct fsf_qtcb_bottom_port *data)
1343 {
1344 volatile struct qdio_buffer_element *sbale;
1345 struct zfcp_fsf_req *req = NULL;
1346 int retval = -EIO;
1347
1348 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1349 return -EOPNOTSUPP;
1350
1351 spin_lock_bh(&adapter->req_q.lock);
1352 if (!zfcp_fsf_sbal_available(adapter))
1353 goto out;
1354
1355 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
1356 NULL);
1357 if (IS_ERR(req)) {
1358 retval = PTR_ERR(req);
1359 goto out;
1360 }
1361
1362 if (data)
1363 req->data = data;
1364
1365 sbale = zfcp_qdio_sbale_req(req);
1366 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1367 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1368
1369 req->handler = zfcp_fsf_exchange_port_data_handler;
1370 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1371 retval = zfcp_fsf_req_send(req);
1372 out:
1373 spin_unlock_bh(&adapter->req_q.lock);
1374 if (!retval)
1375 wait_event(req->completion_wq,
1376 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1377 zfcp_fsf_req_free(req);
1378
1379 return retval;
1380 }
1381
1382 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1383 {
1384 struct zfcp_port *port = req->data;
1385 struct fsf_qtcb_header *header = &req->qtcb->header;
1386 struct fsf_plogi *plogi;
1387
1388 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1389 goto skip_fsfstatus;
1390
1391 switch (header->fsf_status) {
1392 case FSF_PORT_ALREADY_OPEN:
1393 break;
1394 case FSF_ACCESS_DENIED:
1395 zfcp_fsf_access_denied_port(req, port);
1396 break;
1397 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1398 dev_warn(&req->adapter->ccw_device->dev,
1399 "Not enough FCP adapter resources to open "
1400 "remote port 0x%016Lx\n", port->wwpn);
1401 zfcp_erp_port_failed(port, 31, req);
1402 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1403 break;
1404 case FSF_ADAPTER_STATUS_AVAILABLE:
1405 switch (header->fsf_status_qual.word[0]) {
1406 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1407 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1408 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1409 break;
1410 case FSF_SQ_NO_RETRY_POSSIBLE:
1411 dev_warn(&req->adapter->ccw_device->dev,
1412 "Remote port 0x%016Lx could not be opened\n",
1413 port->wwpn);
1414 zfcp_erp_port_failed(port, 32, req);
1415 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1416 break;
1417 }
1418 break;
1419 case FSF_GOOD:
1420 port->handle = header->port_handle;
1421 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1422 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1423 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1424 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1425 &port->status);
1426 /* check whether D_ID has changed during open */
1427 /*
1428 * FIXME: This check is not airtight, as the FCP channel does
1429 * not monitor closures of target port connections caused on
1430 * the remote side. Thus, they might miss out on invalidating
1431 * locally cached WWPNs (and other N_Port parameters) of gone
1432 * target ports. So, our heroic attempt to make things safe
1433 * could be undermined by 'open port' response data tagged with
1434 * obsolete WWPNs. Another reason to monitor potential
1435 * connection closures ourself at least (by interpreting
1436 * incoming ELS' and unsolicited status). It just crosses my
1437 * mind that one should be able to cross-check by means of
1438 * another GID_PN straight after a port has been opened.
1439 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1440 */
1441 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
1442 break;
1443
1444 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1445 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
1446 if (plogi->serv_param.wwpn != port->wwpn)
1447 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
1448 &port->status);
1449 else {
1450 port->wwnn = plogi->serv_param.wwnn;
1451 zfcp_fc_plogi_evaluate(port, plogi);
1452 }
1453 }
1454 break;
1455 case FSF_UNKNOWN_OP_SUBTYPE:
1456 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1457 break;
1458 }
1459
1460 skip_fsfstatus:
1461 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
1462 }
1463
1464 /**
1465 * zfcp_fsf_open_port - create and send open port request
1466 * @erp_action: pointer to struct zfcp_erp_action
1467 * Returns: 0 on success, error otherwise
1468 */
1469 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1470 {
1471 volatile struct qdio_buffer_element *sbale;
1472 struct zfcp_adapter *adapter = erp_action->adapter;
1473 struct zfcp_fsf_req *req;
1474 int retval = -EIO;
1475
1476 spin_lock_bh(&adapter->req_q.lock);
1477 if (zfcp_fsf_req_sbal_get(adapter))
1478 goto out;
1479
1480 req = zfcp_fsf_req_create(adapter,
1481 FSF_QTCB_OPEN_PORT_WITH_DID,
1482 ZFCP_REQ_AUTO_CLEANUP,
1483 adapter->pool.fsf_req_erp);
1484 if (IS_ERR(req)) {
1485 retval = PTR_ERR(req);
1486 goto out;
1487 }
1488
1489 sbale = zfcp_qdio_sbale_req(req);
1490 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1491 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1492
1493 req->handler = zfcp_fsf_open_port_handler;
1494 req->qtcb->bottom.support.d_id = erp_action->port->d_id;
1495 req->data = erp_action->port;
1496 req->erp_action = erp_action;
1497 erp_action->fsf_req = req;
1498 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
1499
1500 zfcp_fsf_start_erp_timer(req);
1501 retval = zfcp_fsf_req_send(req);
1502 if (retval) {
1503 zfcp_fsf_req_free(req);
1504 erp_action->fsf_req = NULL;
1505 }
1506 out:
1507 spin_unlock_bh(&adapter->req_q.lock);
1508 return retval;
1509 }
1510
1511 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1512 {
1513 struct zfcp_port *port = req->data;
1514
1515 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1516 goto skip_fsfstatus;
1517
1518 switch (req->qtcb->header.fsf_status) {
1519 case FSF_PORT_HANDLE_NOT_VALID:
1520 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
1521 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1522 break;
1523 case FSF_ADAPTER_STATUS_AVAILABLE:
1524 break;
1525 case FSF_GOOD:
1526 zfcp_erp_modify_port_status(port, 33, req,
1527 ZFCP_STATUS_COMMON_OPEN,
1528 ZFCP_CLEAR);
1529 break;
1530 }
1531
1532 skip_fsfstatus:
1533 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
1534 }
1535
1536 /**
1537 * zfcp_fsf_close_port - create and send close port request
1538 * @erp_action: pointer to struct zfcp_erp_action
1539 * Returns: 0 on success, error otherwise
1540 */
1541 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1542 {
1543 volatile struct qdio_buffer_element *sbale;
1544 struct zfcp_adapter *adapter = erp_action->adapter;
1545 struct zfcp_fsf_req *req;
1546 int retval = -EIO;
1547
1548 spin_lock_bh(&adapter->req_q.lock);
1549 if (zfcp_fsf_req_sbal_get(adapter))
1550 goto out;
1551
1552 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1553 ZFCP_REQ_AUTO_CLEANUP,
1554 adapter->pool.fsf_req_erp);
1555 if (IS_ERR(req)) {
1556 retval = PTR_ERR(req);
1557 goto out;
1558 }
1559
1560 sbale = zfcp_qdio_sbale_req(req);
1561 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1562 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1563
1564 req->handler = zfcp_fsf_close_port_handler;
1565 req->data = erp_action->port;
1566 req->erp_action = erp_action;
1567 req->qtcb->header.port_handle = erp_action->port->handle;
1568 erp_action->fsf_req = req;
1569 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
1570
1571 zfcp_fsf_start_erp_timer(req);
1572 retval = zfcp_fsf_req_send(req);
1573 if (retval) {
1574 zfcp_fsf_req_free(req);
1575 erp_action->fsf_req = NULL;
1576 }
1577 out:
1578 spin_unlock_bh(&adapter->req_q.lock);
1579 return retval;
1580 }
1581
1582 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1583 {
1584 struct zfcp_port *port = req->data;
1585 struct fsf_qtcb_header *header = &req->qtcb->header;
1586 struct zfcp_unit *unit;
1587
1588 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1589 goto skip_fsfstatus;
1590
1591 switch (header->fsf_status) {
1592 case FSF_PORT_HANDLE_NOT_VALID:
1593 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
1594 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1595 break;
1596 case FSF_ACCESS_DENIED:
1597 zfcp_fsf_access_denied_port(req, port);
1598 break;
1599 case FSF_PORT_BOXED:
1600 zfcp_erp_port_boxed(port, 50, req);
1601 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1602 ZFCP_STATUS_FSFREQ_RETRY;
1603 /* can't use generic zfcp_erp_modify_port_status because
1604 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1605 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1606 list_for_each_entry(unit, &port->unit_list_head, list)
1607 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1608 &unit->status);
1609 break;
1610 case FSF_ADAPTER_STATUS_AVAILABLE:
1611 switch (header->fsf_status_qual.word[0]) {
1612 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1613 /* fall through */
1614 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1615 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1616 break;
1617 }
1618 break;
1619 case FSF_GOOD:
1620 /* can't use generic zfcp_erp_modify_port_status because
1621 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1622 */
1623 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1624 list_for_each_entry(unit, &port->unit_list_head, list)
1625 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1626 &unit->status);
1627 break;
1628 }
1629 skip_fsfstatus:
1630 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
1631 }
1632
1633 /**
1634 * zfcp_fsf_close_physical_port - close physical port
1635 * @erp_action: pointer to struct zfcp_erp_action
1636 * Returns: 0 on success
1637 */
1638 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1639 {
1640 volatile struct qdio_buffer_element *sbale;
1641 struct zfcp_adapter *adapter = erp_action->adapter;
1642 struct zfcp_fsf_req *req;
1643 int retval = -EIO;
1644
1645 spin_lock_bh(&adapter->req_q.lock);
1646 if (zfcp_fsf_req_sbal_get(adapter))
1647 goto out;
1648
1649 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1650 ZFCP_REQ_AUTO_CLEANUP,
1651 adapter->pool.fsf_req_erp);
1652 if (IS_ERR(req)) {
1653 retval = PTR_ERR(req);
1654 goto out;
1655 }
1656
1657 sbale = zfcp_qdio_sbale_req(req);
1658 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1659 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1660
1661 req->data = erp_action->port;
1662 req->qtcb->header.port_handle = erp_action->port->handle;
1663 req->erp_action = erp_action;
1664 req->handler = zfcp_fsf_close_physical_port_handler;
1665 erp_action->fsf_req = req;
1666 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
1667 &erp_action->port->status);
1668
1669 zfcp_fsf_start_erp_timer(req);
1670 retval = zfcp_fsf_req_send(req);
1671 if (retval) {
1672 zfcp_fsf_req_free(req);
1673 erp_action->fsf_req = NULL;
1674 }
1675 out:
1676 spin_unlock_bh(&adapter->req_q.lock);
1677 return retval;
1678 }
1679
1680 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1681 {
1682 struct zfcp_adapter *adapter = req->adapter;
1683 struct zfcp_unit *unit = req->data;
1684 struct fsf_qtcb_header *header = &req->qtcb->header;
1685 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1686 struct fsf_queue_designator *queue_designator =
1687 &header->fsf_status_qual.fsf_queue_designator;
1688 int exclusive, readwrite;
1689
1690 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1691 goto skip_fsfstatus;
1692
1693 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1694 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1695 ZFCP_STATUS_UNIT_SHARED |
1696 ZFCP_STATUS_UNIT_READONLY,
1697 &unit->status);
1698
1699 switch (header->fsf_status) {
1700
1701 case FSF_PORT_HANDLE_NOT_VALID:
1702 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
1703 /* fall through */
1704 case FSF_LUN_ALREADY_OPEN:
1705 break;
1706 case FSF_ACCESS_DENIED:
1707 zfcp_fsf_access_denied_unit(req, unit);
1708 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1709 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1710 break;
1711 case FSF_PORT_BOXED:
1712 zfcp_erp_port_boxed(unit->port, 51, req);
1713 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1714 ZFCP_STATUS_FSFREQ_RETRY;
1715 break;
1716 case FSF_LUN_SHARING_VIOLATION:
1717 if (header->fsf_status_qual.word[0])
1718 dev_warn(&adapter->ccw_device->dev,
1719 "LUN 0x%Lx on port 0x%Lx is already in "
1720 "use by CSS%d, MIF Image ID %x\n",
1721 unit->fcp_lun,
1722 unit->port->wwpn,
1723 queue_designator->cssid,
1724 queue_designator->hla);
1725 else
1726 zfcp_act_eval_err(adapter,
1727 header->fsf_status_qual.word[2]);
1728 zfcp_erp_unit_access_denied(unit, 60, req);
1729 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1730 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1731 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1732 break;
1733 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1734 dev_warn(&adapter->ccw_device->dev,
1735 "No handle is available for LUN "
1736 "0x%016Lx on port 0x%016Lx\n",
1737 unit->fcp_lun, unit->port->wwpn);
1738 zfcp_erp_unit_failed(unit, 34, req);
1739 /* fall through */
1740 case FSF_INVALID_COMMAND_OPTION:
1741 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1742 break;
1743 case FSF_ADAPTER_STATUS_AVAILABLE:
1744 switch (header->fsf_status_qual.word[0]) {
1745 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1746 zfcp_test_link(unit->port);
1747 /* fall through */
1748 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1749 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1750 break;
1751 }
1752 break;
1753
1754 case FSF_GOOD:
1755 unit->handle = header->lun_handle;
1756 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1757
1758 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1759 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1760 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
1761 exclusive = (bottom->lun_access_info &
1762 FSF_UNIT_ACCESS_EXCLUSIVE);
1763 readwrite = (bottom->lun_access_info &
1764 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1765
1766 if (!exclusive)
1767 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1768 &unit->status);
1769
1770 if (!readwrite) {
1771 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1772 &unit->status);
1773 dev_info(&adapter->ccw_device->dev,
1774 "SCSI device at LUN 0x%016Lx on port "
1775 "0x%016Lx opened read-only\n",
1776 unit->fcp_lun, unit->port->wwpn);
1777 }
1778
1779 if (exclusive && !readwrite) {
1780 dev_err(&adapter->ccw_device->dev,
1781 "Exclusive read-only access not "
1782 "supported (unit 0x%016Lx, "
1783 "port 0x%016Lx)\n",
1784 unit->fcp_lun, unit->port->wwpn);
1785 zfcp_erp_unit_failed(unit, 35, req);
1786 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1787 zfcp_erp_unit_shutdown(unit, 0, 80, req);
1788 } else if (!exclusive && readwrite) {
1789 dev_err(&adapter->ccw_device->dev,
1790 "Shared read-write access not "
1791 "supported (unit 0x%016Lx, port "
1792 "0x%016Lx\n)",
1793 unit->fcp_lun, unit->port->wwpn);
1794 zfcp_erp_unit_failed(unit, 36, req);
1795 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1796 zfcp_erp_unit_shutdown(unit, 0, 81, req);
1797 }
1798 }
1799 break;
1800 }
1801
1802 skip_fsfstatus:
1803 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
1804 }
1805
1806 /**
1807 * zfcp_fsf_open_unit - open unit
1808 * @erp_action: pointer to struct zfcp_erp_action
1809 * Returns: 0 on success, error otherwise
1810 */
1811 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1812 {
1813 volatile struct qdio_buffer_element *sbale;
1814 struct zfcp_adapter *adapter = erp_action->adapter;
1815 struct zfcp_fsf_req *req;
1816 int retval = -EIO;
1817
1818 spin_lock_bh(&adapter->req_q.lock);
1819 if (zfcp_fsf_req_sbal_get(adapter))
1820 goto out;
1821
1822 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
1823 ZFCP_REQ_AUTO_CLEANUP,
1824 adapter->pool.fsf_req_erp);
1825 if (IS_ERR(req)) {
1826 retval = PTR_ERR(req);
1827 goto out;
1828 }
1829
1830 sbale = zfcp_qdio_sbale_req(req);
1831 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1832 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1833
1834 req->qtcb->header.port_handle = erp_action->port->handle;
1835 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1836 req->handler = zfcp_fsf_open_unit_handler;
1837 req->data = erp_action->unit;
1838 req->erp_action = erp_action;
1839 erp_action->fsf_req = req;
1840
1841 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1842 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1843
1844 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1845
1846 zfcp_fsf_start_erp_timer(req);
1847 retval = zfcp_fsf_req_send(req);
1848 if (retval) {
1849 zfcp_fsf_req_free(req);
1850 erp_action->fsf_req = NULL;
1851 }
1852 out:
1853 spin_unlock_bh(&adapter->req_q.lock);
1854 return retval;
1855 }
1856
1857 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1858 {
1859 struct zfcp_unit *unit = req->data;
1860
1861 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1862 goto skip_fsfstatus;
1863
1864 switch (req->qtcb->header.fsf_status) {
1865 case FSF_PORT_HANDLE_NOT_VALID:
1866 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
1867 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1868 break;
1869 case FSF_LUN_HANDLE_NOT_VALID:
1870 zfcp_erp_port_reopen(unit->port, 0, 111, req);
1871 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1872 break;
1873 case FSF_PORT_BOXED:
1874 zfcp_erp_port_boxed(unit->port, 52, req);
1875 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1876 ZFCP_STATUS_FSFREQ_RETRY;
1877 break;
1878 case FSF_ADAPTER_STATUS_AVAILABLE:
1879 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1880 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1881 zfcp_test_link(unit->port);
1882 /* fall through */
1883 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1884 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1885 break;
1886 }
1887 break;
1888 case FSF_GOOD:
1889 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1890 break;
1891 }
1892 skip_fsfstatus:
1893 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
1894 }
1895
1896 /**
1897 * zfcp_fsf_close_unit - close zfcp unit
1898 * @erp_action: pointer to struct zfcp_unit
1899 * Returns: 0 on success, error otherwise
1900 */
1901 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1902 {
1903 volatile struct qdio_buffer_element *sbale;
1904 struct zfcp_adapter *adapter = erp_action->adapter;
1905 struct zfcp_fsf_req *req;
1906 int retval = -EIO;
1907
1908 spin_lock_bh(&adapter->req_q.lock);
1909 if (zfcp_fsf_req_sbal_get(adapter))
1910 goto out;
1911 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
1912 ZFCP_REQ_AUTO_CLEANUP,
1913 adapter->pool.fsf_req_erp);
1914 if (IS_ERR(req)) {
1915 retval = PTR_ERR(req);
1916 goto out;
1917 }
1918
1919 sbale = zfcp_qdio_sbale_req(req);
1920 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1921 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1922
1923 req->qtcb->header.port_handle = erp_action->port->handle;
1924 req->qtcb->header.lun_handle = erp_action->unit->handle;
1925 req->handler = zfcp_fsf_close_unit_handler;
1926 req->data = erp_action->unit;
1927 req->erp_action = erp_action;
1928 erp_action->fsf_req = req;
1929 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
1930
1931 zfcp_fsf_start_erp_timer(req);
1932 retval = zfcp_fsf_req_send(req);
1933 if (retval) {
1934 zfcp_fsf_req_free(req);
1935 erp_action->fsf_req = NULL;
1936 }
1937 out:
1938 spin_unlock_bh(&adapter->req_q.lock);
1939 return retval;
1940 }
1941
1942 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1943 {
1944 lat_rec->sum += lat;
1945 lat_rec->min = min(lat_rec->min, lat);
1946 lat_rec->max = max(lat_rec->max, lat);
1947 }
1948
1949 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
1950 {
1951 struct fsf_qual_latency_info *lat_inf;
1952 struct latency_cont *lat;
1953 struct zfcp_unit *unit = req->unit;
1954 unsigned long flags;
1955
1956 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
1957
1958 switch (req->qtcb->bottom.io.data_direction) {
1959 case FSF_DATADIR_READ:
1960 lat = &unit->latencies.read;
1961 break;
1962 case FSF_DATADIR_WRITE:
1963 lat = &unit->latencies.write;
1964 break;
1965 case FSF_DATADIR_CMND:
1966 lat = &unit->latencies.cmd;
1967 break;
1968 default:
1969 return;
1970 }
1971
1972 spin_lock_irqsave(&unit->latencies.lock, flags);
1973 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
1974 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
1975 lat->counter++;
1976 spin_unlock_irqrestore(&unit->latencies.lock, flags);
1977 }
1978
1979 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
1980 {
1981 struct scsi_cmnd *scpnt = req->data;
1982 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
1983 &(req->qtcb->bottom.io.fcp_rsp);
1984 u32 sns_len;
1985 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
1986 unsigned long flags;
1987
1988 if (unlikely(!scpnt))
1989 return;
1990
1991 read_lock_irqsave(&req->adapter->abort_lock, flags);
1992
1993 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
1994 set_host_byte(scpnt, DID_SOFT_ERROR);
1995 set_driver_byte(scpnt, SUGGEST_RETRY);
1996 goto skip_fsfstatus;
1997 }
1998
1999 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2000 set_host_byte(scpnt, DID_ERROR);
2001 goto skip_fsfstatus;
2002 }
2003
2004 set_msg_byte(scpnt, COMMAND_COMPLETE);
2005
2006 scpnt->result |= fcp_rsp_iu->scsi_status;
2007
2008 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2009 zfcp_fsf_req_latency(req);
2010
2011 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2012 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2013 set_host_byte(scpnt, DID_OK);
2014 else {
2015 set_host_byte(scpnt, DID_ERROR);
2016 goto skip_fsfstatus;
2017 }
2018 }
2019
2020 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2021 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2022 fcp_rsp_iu->fcp_rsp_len;
2023 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2024 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2025
2026 memcpy(scpnt->sense_buffer,
2027 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2028 }
2029
2030 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2031 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2032 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2033 scpnt->underflow)
2034 set_host_byte(scpnt, DID_ERROR);
2035 }
2036 skip_fsfstatus:
2037 if (scpnt->result != 0)
2038 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
2039 else if (scpnt->retries > 0)
2040 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
2041 else
2042 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
2043
2044 scpnt->host_scribble = NULL;
2045 (scpnt->scsi_done) (scpnt);
2046 /*
2047 * We must hold this lock until scsi_done has been called.
2048 * Otherwise we may call scsi_done after abort regarding this
2049 * command has completed.
2050 * Note: scsi_done must not block!
2051 */
2052 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2053 }
2054
2055 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2056 {
2057 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2058 &(req->qtcb->bottom.io.fcp_rsp);
2059 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2060
2061 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
2062 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2063 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2064 }
2065
2066
2067 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2068 {
2069 struct zfcp_unit *unit;
2070 struct fsf_qtcb_header *header = &req->qtcb->header;
2071
2072 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2073 unit = req->data;
2074 else
2075 unit = req->unit;
2076
2077 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2078 goto skip_fsfstatus;
2079
2080 switch (header->fsf_status) {
2081 case FSF_HANDLE_MISMATCH:
2082 case FSF_PORT_HANDLE_NOT_VALID:
2083 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
2084 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2085 break;
2086 case FSF_FCPLUN_NOT_VALID:
2087 case FSF_LUN_HANDLE_NOT_VALID:
2088 zfcp_erp_port_reopen(unit->port, 0, 113, req);
2089 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2090 break;
2091 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2092 zfcp_fsf_class_not_supp(req);
2093 break;
2094 case FSF_ACCESS_DENIED:
2095 zfcp_fsf_access_denied_unit(req, unit);
2096 break;
2097 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2098 dev_err(&req->adapter->ccw_device->dev,
2099 "Incorrect direction %d, unit 0x%016Lx on port "
2100 "0x%016Lx closed\n",
2101 req->qtcb->bottom.io.data_direction,
2102 unit->fcp_lun, unit->port->wwpn);
2103 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
2104 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2105 break;
2106 case FSF_CMND_LENGTH_NOT_VALID:
2107 dev_err(&req->adapter->ccw_device->dev,
2108 "Incorrect CDB length %d, unit 0x%016Lx on "
2109 "port 0x%016Lx closed\n",
2110 req->qtcb->bottom.io.fcp_cmnd_length,
2111 unit->fcp_lun, unit->port->wwpn);
2112 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
2113 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2114 break;
2115 case FSF_PORT_BOXED:
2116 zfcp_erp_port_boxed(unit->port, 53, req);
2117 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2118 ZFCP_STATUS_FSFREQ_RETRY;
2119 break;
2120 case FSF_LUN_BOXED:
2121 zfcp_erp_unit_boxed(unit, 54, req);
2122 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2123 ZFCP_STATUS_FSFREQ_RETRY;
2124 break;
2125 case FSF_ADAPTER_STATUS_AVAILABLE:
2126 if (header->fsf_status_qual.word[0] ==
2127 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2128 zfcp_test_link(unit->port);
2129 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2130 break;
2131 }
2132 skip_fsfstatus:
2133 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2134 zfcp_fsf_send_fcp_ctm_handler(req);
2135 else {
2136 zfcp_fsf_send_fcp_command_task_handler(req);
2137 req->unit = NULL;
2138 zfcp_unit_put(unit);
2139 }
2140 }
2141
2142 /**
2143 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2144 * @adapter: adapter where scsi command is issued
2145 * @unit: unit where command is sent to
2146 * @scsi_cmnd: scsi command to be sent
2147 * @timer: timer to be started when request is initiated
2148 * @req_flags: flags for fsf_request
2149 */
2150 int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2151 struct zfcp_unit *unit,
2152 struct scsi_cmnd *scsi_cmnd,
2153 int use_timer, int req_flags)
2154 {
2155 struct zfcp_fsf_req *req;
2156 struct fcp_cmnd_iu *fcp_cmnd_iu;
2157 unsigned int sbtype;
2158 int real_bytes, retval = -EIO;
2159
2160 if (unlikely(!(atomic_read(&unit->status) &
2161 ZFCP_STATUS_COMMON_UNBLOCKED)))
2162 return -EBUSY;
2163
2164 spin_lock(&adapter->req_q.lock);
2165 if (!zfcp_fsf_sbal_available(adapter))
2166 goto out;
2167 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2168 adapter->pool.fsf_req_scsi);
2169 if (IS_ERR(req)) {
2170 retval = PTR_ERR(req);
2171 goto out;
2172 }
2173
2174 zfcp_unit_get(unit);
2175 req->unit = unit;
2176 req->data = scsi_cmnd;
2177 req->handler = zfcp_fsf_send_fcp_command_handler;
2178 req->qtcb->header.lun_handle = unit->handle;
2179 req->qtcb->header.port_handle = unit->port->handle;
2180 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2181
2182 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2183
2184 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2185 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2186 /*
2187 * set depending on data direction:
2188 * data direction bits in SBALE (SB Type)
2189 * data direction bits in QTCB
2190 * data direction bits in FCP_CMND IU
2191 */
2192 switch (scsi_cmnd->sc_data_direction) {
2193 case DMA_NONE:
2194 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2195 sbtype = SBAL_FLAGS0_TYPE_READ;
2196 break;
2197 case DMA_FROM_DEVICE:
2198 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2199 sbtype = SBAL_FLAGS0_TYPE_READ;
2200 fcp_cmnd_iu->rddata = 1;
2201 break;
2202 case DMA_TO_DEVICE:
2203 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2204 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2205 fcp_cmnd_iu->wddata = 1;
2206 break;
2207 case DMA_BIDIRECTIONAL:
2208 default:
2209 retval = -EIO;
2210 goto failed_scsi_cmnd;
2211 }
2212
2213 if (likely((scsi_cmnd->device->simple_tags) ||
2214 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
2215 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2216 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2217 else
2218 fcp_cmnd_iu->task_attribute = UNTAGGED;
2219
2220 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2221 fcp_cmnd_iu->add_fcp_cdb_length =
2222 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2223
2224 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2225
2226 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2227 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t);
2228
2229 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
2230 scsi_sglist(scsi_cmnd),
2231 FSF_MAX_SBALS_PER_REQ);
2232 if (unlikely(real_bytes < 0)) {
2233 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
2234 retval = -EIO;
2235 else {
2236 dev_err(&adapter->ccw_device->dev,
2237 "Oversize data package, unit 0x%016Lx "
2238 "on port 0x%016Lx closed\n",
2239 unit->fcp_lun, unit->port->wwpn);
2240 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2241 retval = -EINVAL;
2242 }
2243 goto failed_scsi_cmnd;
2244 }
2245
2246 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2247
2248 if (use_timer)
2249 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2250
2251 retval = zfcp_fsf_req_send(req);
2252 if (unlikely(retval))
2253 goto failed_scsi_cmnd;
2254
2255 goto out;
2256
2257 failed_scsi_cmnd:
2258 zfcp_unit_put(unit);
2259 zfcp_fsf_req_free(req);
2260 scsi_cmnd->host_scribble = NULL;
2261 out:
2262 spin_unlock(&adapter->req_q.lock);
2263 return retval;
2264 }
2265
2266 /**
2267 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2268 * @adapter: pointer to struct zfcp-adapter
2269 * @unit: pointer to struct zfcp_unit
2270 * @tm_flags: unsigned byte for task management flags
2271 * @req_flags: int request flags
2272 * Returns: on success pointer to struct fsf_req, NULL otherwise
2273 */
2274 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2275 struct zfcp_unit *unit,
2276 u8 tm_flags, int req_flags)
2277 {
2278 volatile struct qdio_buffer_element *sbale;
2279 struct zfcp_fsf_req *req = NULL;
2280 struct fcp_cmnd_iu *fcp_cmnd_iu;
2281
2282 if (unlikely(!(atomic_read(&unit->status) &
2283 ZFCP_STATUS_COMMON_UNBLOCKED)))
2284 return NULL;
2285
2286 spin_lock(&adapter->req_q.lock);
2287 if (!zfcp_fsf_sbal_available(adapter))
2288 goto out;
2289 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2290 adapter->pool.fsf_req_scsi);
2291 if (IS_ERR(req))
2292 goto out;
2293
2294 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2295 req->data = unit;
2296 req->handler = zfcp_fsf_send_fcp_command_handler;
2297 req->qtcb->header.lun_handle = unit->handle;
2298 req->qtcb->header.port_handle = unit->port->handle;
2299 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2300 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2301 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2302 sizeof(fcp_dl_t);
2303
2304 sbale = zfcp_qdio_sbale_req(req);
2305 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2306 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2307
2308 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
2309 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2310 fcp_cmnd_iu->task_management_flags = tm_flags;
2311
2312 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2313 if (!zfcp_fsf_req_send(req))
2314 goto out;
2315
2316 zfcp_fsf_req_free(req);
2317 req = NULL;
2318 out:
2319 spin_unlock(&adapter->req_q.lock);
2320 return req;
2321 }
2322
2323 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2324 {
2325 if (req->qtcb->header.fsf_status != FSF_GOOD)
2326 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2327 }
2328
2329 /**
2330 * zfcp_fsf_control_file - control file upload/download
2331 * @adapter: pointer to struct zfcp_adapter
2332 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2333 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2334 */
2335 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2336 struct zfcp_fsf_cfdc *fsf_cfdc)
2337 {
2338 volatile struct qdio_buffer_element *sbale;
2339 struct zfcp_fsf_req *req = NULL;
2340 struct fsf_qtcb_bottom_support *bottom;
2341 int direction, retval = -EIO, bytes;
2342
2343 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2344 return ERR_PTR(-EOPNOTSUPP);
2345
2346 switch (fsf_cfdc->command) {
2347 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2348 direction = SBAL_FLAGS0_TYPE_WRITE;
2349 break;
2350 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2351 direction = SBAL_FLAGS0_TYPE_READ;
2352 break;
2353 default:
2354 return ERR_PTR(-EINVAL);
2355 }
2356
2357 spin_lock_bh(&adapter->req_q.lock);
2358 if (zfcp_fsf_req_sbal_get(adapter))
2359 goto out;
2360
2361 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
2362 if (IS_ERR(req)) {
2363 retval = -EPERM;
2364 goto out;
2365 }
2366
2367 req->handler = zfcp_fsf_control_file_handler;
2368
2369 sbale = zfcp_qdio_sbale_req(req);
2370 sbale[0].flags |= direction;
2371
2372 bottom = &req->qtcb->bottom.support;
2373 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2374 bottom->option = fsf_cfdc->option;
2375
2376 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
2377 FSF_MAX_SBALS_PER_REQ);
2378 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2379 retval = -ENOMEM;
2380 zfcp_fsf_req_free(req);
2381 goto out;
2382 }
2383
2384 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2385 retval = zfcp_fsf_req_send(req);
2386 out:
2387 spin_unlock_bh(&adapter->req_q.lock);
2388
2389 if (!retval) {
2390 wait_event(req->completion_wq,
2391 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2392 return req;
2393 }
2394 return ERR_PTR(retval);
2395 }
This page took 0.089294 seconds and 5 git commands to generate.