e88b7804780be0ce7ff4b78ba86f452df1994051
[deliverable/linux.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2009
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include "zfcp_ext.h"
14 #include "zfcp_dbf.h"
15
16 static void zfcp_fsf_request_timeout_handler(unsigned long data)
17 {
18 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
19 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
20 "fsrth_1", NULL);
21 }
22
23 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
24 unsigned long timeout)
25 {
26 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
27 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
28 fsf_req->timer.expires = jiffies + timeout;
29 add_timer(&fsf_req->timer);
30 }
31
32 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
33 {
34 BUG_ON(!fsf_req->erp_action);
35 fsf_req->timer.function = zfcp_erp_timeout_handler;
36 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
37 fsf_req->timer.expires = jiffies + 30 * HZ;
38 add_timer(&fsf_req->timer);
39 }
40
41 /* association between FSF command and FSF QTCB type */
42 static u32 fsf_qtcb_type[] = {
43 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
44 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
45 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
46 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
47 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
48 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
49 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
50 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
52 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
53 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
54 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
56 };
57
58 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
59 {
60 u16 subtable = table >> 16;
61 u16 rule = table & 0xffff;
62 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
63
64 if (subtable && subtable < ARRAY_SIZE(act_type))
65 dev_warn(&adapter->ccw_device->dev,
66 "Access denied according to ACT rule type %s, "
67 "rule %d\n", act_type[subtable], rule);
68 }
69
70 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
71 struct zfcp_port *port)
72 {
73 struct fsf_qtcb_header *header = &req->qtcb->header;
74 dev_warn(&req->adapter->ccw_device->dev,
75 "Access denied to port 0x%016Lx\n",
76 (unsigned long long)port->wwpn);
77 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
78 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
79 zfcp_erp_port_access_denied(port, "fspad_1", req);
80 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
81 }
82
83 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
84 struct zfcp_unit *unit)
85 {
86 struct fsf_qtcb_header *header = &req->qtcb->header;
87 dev_warn(&req->adapter->ccw_device->dev,
88 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
89 (unsigned long long)unit->fcp_lun,
90 (unsigned long long)unit->port->wwpn);
91 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
92 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
93 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
94 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
95 }
96
97 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
98 {
99 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
100 "operational because of an unsupported FC class\n");
101 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
102 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
103 }
104
105 /**
106 * zfcp_fsf_req_free - free memory used by fsf request
107 * @fsf_req: pointer to struct zfcp_fsf_req
108 */
109 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
110 {
111 if (likely(req->pool)) {
112 if (likely(req->qtcb))
113 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
114 mempool_free(req, req->pool);
115 return;
116 }
117
118 if (likely(req->qtcb))
119 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
120 kfree(req);
121 }
122
123 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124 {
125 struct fsf_status_read_buffer *sr_buf = req->data;
126 struct zfcp_adapter *adapter = req->adapter;
127 struct zfcp_port *port;
128 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
129 unsigned long flags;
130
131 read_lock_irqsave(&zfcp_data.config_lock, flags);
132 list_for_each_entry(port, &adapter->port_list_head, list)
133 if (port->d_id == d_id) {
134 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 return;
137 }
138 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
139 }
140
141 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down)
143 {
144 struct zfcp_adapter *adapter = req->adapter;
145 unsigned long flags;
146
147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 return;
149
150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151
152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 zfcp_scsi_schedule_rports_block(adapter);
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155
156 if (!link_down)
157 goto out;
158
159 switch (link_down->error_code) {
160 case FSF_PSQ_LINK_NO_LIGHT:
161 dev_warn(&req->adapter->ccw_device->dev,
162 "There is no light signal from the local "
163 "fibre channel cable\n");
164 break;
165 case FSF_PSQ_LINK_WRAP_PLUG:
166 dev_warn(&req->adapter->ccw_device->dev,
167 "There is a wrap plug instead of a fibre "
168 "channel cable\n");
169 break;
170 case FSF_PSQ_LINK_NO_FCP:
171 dev_warn(&req->adapter->ccw_device->dev,
172 "The adjacent fibre channel node does not "
173 "support FCP\n");
174 break;
175 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
176 dev_warn(&req->adapter->ccw_device->dev,
177 "The FCP device is suspended because of a "
178 "firmware update\n");
179 break;
180 case FSF_PSQ_LINK_INVALID_WWPN:
181 dev_warn(&req->adapter->ccw_device->dev,
182 "The FCP device detected a WWPN that is "
183 "duplicate or not valid\n");
184 break;
185 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
186 dev_warn(&req->adapter->ccw_device->dev,
187 "The fibre channel fabric does not support NPIV\n");
188 break;
189 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
190 dev_warn(&req->adapter->ccw_device->dev,
191 "The FCP adapter cannot support more NPIV ports\n");
192 break;
193 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
194 dev_warn(&req->adapter->ccw_device->dev,
195 "The adjacent switch cannot support "
196 "more NPIV ports\n");
197 break;
198 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
199 dev_warn(&req->adapter->ccw_device->dev,
200 "The FCP adapter could not log in to the "
201 "fibre channel fabric\n");
202 break;
203 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
204 dev_warn(&req->adapter->ccw_device->dev,
205 "The WWPN assignment file on the FCP adapter "
206 "has been damaged\n");
207 break;
208 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
209 dev_warn(&req->adapter->ccw_device->dev,
210 "The mode table on the FCP adapter "
211 "has been damaged\n");
212 break;
213 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
214 dev_warn(&req->adapter->ccw_device->dev,
215 "All NPIV ports on the FCP adapter have "
216 "been assigned\n");
217 break;
218 default:
219 dev_warn(&req->adapter->ccw_device->dev,
220 "The link between the FCP adapter and "
221 "the FC fabric is down\n");
222 }
223 out:
224 zfcp_erp_adapter_failed(adapter, id, req);
225 }
226
227 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
228 {
229 struct fsf_status_read_buffer *sr_buf = req->data;
230 struct fsf_link_down_info *ldi =
231 (struct fsf_link_down_info *) &sr_buf->payload;
232
233 switch (sr_buf->status_subtype) {
234 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
235 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
236 break;
237 case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
239 break;
240 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
242 };
243 }
244
245 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
246 {
247 struct zfcp_adapter *adapter = req->adapter;
248 struct fsf_status_read_buffer *sr_buf = req->data;
249
250 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
251 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
252 mempool_free(sr_buf, adapter->pool.status_read_data);
253 zfcp_fsf_req_free(req);
254 return;
255 }
256
257 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
258
259 switch (sr_buf->status_type) {
260 case FSF_STATUS_READ_PORT_CLOSED:
261 zfcp_fsf_status_read_port_closed(req);
262 break;
263 case FSF_STATUS_READ_INCOMING_ELS:
264 zfcp_fc_incoming_els(req);
265 break;
266 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
267 break;
268 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
269 dev_warn(&adapter->ccw_device->dev,
270 "The error threshold for checksum statistics "
271 "has been exceeded\n");
272 zfcp_hba_dbf_event_berr(adapter, req);
273 break;
274 case FSF_STATUS_READ_LINK_DOWN:
275 zfcp_fsf_status_read_link_down(req);
276 break;
277 case FSF_STATUS_READ_LINK_UP:
278 dev_info(&adapter->ccw_device->dev,
279 "The local link has been restored\n");
280 /* All ports should be marked as ready to run again */
281 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
282 ZFCP_STATUS_COMMON_RUNNING,
283 ZFCP_SET);
284 zfcp_erp_adapter_reopen(adapter,
285 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
286 ZFCP_STATUS_COMMON_ERP_FAILED,
287 "fssrh_2", req);
288 break;
289 case FSF_STATUS_READ_NOTIFICATION_LOST:
290 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
291 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 req);
293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 schedule_work(&adapter->scan_work);
295 break;
296 case FSF_STATUS_READ_CFDC_UPDATED:
297 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
298 break;
299 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
300 adapter->adapter_features = sr_buf->payload.word[0];
301 break;
302 }
303
304 mempool_free(sr_buf, adapter->pool.status_read_data);
305 zfcp_fsf_req_free(req);
306
307 atomic_inc(&adapter->stat_miss);
308 queue_work(adapter->work_queue, &adapter->stat_work);
309 }
310
311 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
312 {
313 switch (req->qtcb->header.fsf_status_qual.word[0]) {
314 case FSF_SQ_FCP_RSP_AVAILABLE:
315 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
316 case FSF_SQ_NO_RETRY_POSSIBLE:
317 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 return;
319 case FSF_SQ_COMMAND_ABORTED:
320 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 break;
322 case FSF_SQ_NO_RECOM:
323 dev_err(&req->adapter->ccw_device->dev,
324 "The FCP adapter reported a problem "
325 "that cannot be recovered\n");
326 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
327 break;
328 }
329 /* all non-return stats set FSFREQ_ERROR*/
330 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
331 }
332
333 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
334 {
335 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
336 return;
337
338 switch (req->qtcb->header.fsf_status) {
339 case FSF_UNKNOWN_COMMAND:
340 dev_err(&req->adapter->ccw_device->dev,
341 "The FCP adapter does not recognize the command 0x%x\n",
342 req->qtcb->header.fsf_command);
343 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
344 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
345 break;
346 case FSF_ADAPTER_STATUS_AVAILABLE:
347 zfcp_fsf_fsfstatus_qual_eval(req);
348 break;
349 }
350 }
351
352 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
353 {
354 struct zfcp_adapter *adapter = req->adapter;
355 struct fsf_qtcb *qtcb = req->qtcb;
356 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
357
358 zfcp_hba_dbf_event_fsf_response(req);
359
360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
362 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 return;
364 }
365
366 switch (qtcb->prefix.prot_status) {
367 case FSF_PROT_GOOD:
368 case FSF_PROT_FSF_STATUS_PRESENTED:
369 return;
370 case FSF_PROT_QTCB_VERSION_ERROR:
371 dev_err(&adapter->ccw_device->dev,
372 "QTCB version 0x%x not supported by FCP adapter "
373 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
374 psq->word[0], psq->word[1]);
375 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
376 break;
377 case FSF_PROT_ERROR_STATE:
378 case FSF_PROT_SEQ_NUMB_ERROR:
379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
381 break;
382 case FSF_PROT_UNSUPP_QTCB_TYPE:
383 dev_err(&adapter->ccw_device->dev,
384 "The QTCB type is not supported by the FCP adapter\n");
385 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
386 break;
387 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
388 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
389 &adapter->status);
390 break;
391 case FSF_PROT_DUPLICATE_REQUEST_ID:
392 dev_err(&adapter->ccw_device->dev,
393 "0x%Lx is an ambiguous request identifier\n",
394 (unsigned long long)qtcb->bottom.support.req_handle);
395 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
396 break;
397 case FSF_PROT_LINK_DOWN:
398 zfcp_fsf_link_down_info_eval(req, "fspse_5",
399 &psq->link_down_info);
400 /* FIXME: reopening adapter now? better wait for link up */
401 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
402 break;
403 case FSF_PROT_REEST_QUEUE:
404 /* All ports should be marked as ready to run again */
405 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
406 ZFCP_STATUS_COMMON_RUNNING,
407 ZFCP_SET);
408 zfcp_erp_adapter_reopen(adapter,
409 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
410 ZFCP_STATUS_COMMON_ERP_FAILED,
411 "fspse_8", req);
412 break;
413 default:
414 dev_err(&adapter->ccw_device->dev,
415 "0x%x is not a valid transfer protocol status\n",
416 qtcb->prefix.prot_status);
417 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
418 }
419 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
420 }
421
422 /**
423 * zfcp_fsf_req_complete - process completion of a FSF request
424 * @fsf_req: The FSF request that has been completed.
425 *
426 * When a request has been completed either from the FCP adapter,
427 * or it has been dismissed due to a queue shutdown, this function
428 * is called to process the completion status and trigger further
429 * events related to the FSF request.
430 */
431 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
432 {
433 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
434 zfcp_fsf_status_read_handler(req);
435 return;
436 }
437
438 del_timer(&req->timer);
439 zfcp_fsf_protstatus_eval(req);
440 zfcp_fsf_fsfstatus_eval(req);
441 req->handler(req);
442
443 if (req->erp_action)
444 zfcp_erp_notify(req->erp_action, 0);
445
446 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
447 zfcp_fsf_req_free(req);
448 else
449 complete(&req->completion);
450 }
451
452 /**
453 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
454 * @adapter: pointer to struct zfcp_adapter
455 *
456 * Never ever call this without shutting down the adapter first.
457 * Otherwise the adapter would continue using and corrupting s390 storage.
458 * Included BUG_ON() call to ensure this is done.
459 * ERP is supposed to be the only user of this function.
460 */
461 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462 {
463 struct zfcp_fsf_req *req, *tmp;
464 unsigned long flags;
465 LIST_HEAD(remove_queue);
466 unsigned int i;
467
468 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 spin_lock_irqsave(&adapter->req_list_lock, flags);
470 for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 list_splice_init(&adapter->req_list[i], &remove_queue);
472 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473
474 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 list_del(&req->list);
476 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 zfcp_fsf_req_complete(req);
478 }
479 }
480
481 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482 {
483 struct fsf_qtcb_bottom_config *bottom;
484 struct zfcp_adapter *adapter = req->adapter;
485 struct Scsi_Host *shost = adapter->scsi_host;
486
487 bottom = &req->qtcb->bottom.config;
488
489 if (req->data)
490 memcpy(req->data, bottom, sizeof(*bottom));
491
492 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
493 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
494 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
495 fc_host_speed(shost) = bottom->fc_link_speed;
496 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
497
498 adapter->hydra_version = bottom->adapter_type;
499 adapter->timer_ticks = bottom->timer_interval;
500
501 if (fc_host_permanent_port_name(shost) == -1)
502 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
503
504 switch (bottom->fc_topology) {
505 case FSF_TOPO_P2P:
506 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
507 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
508 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
509 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 break;
511 case FSF_TOPO_FABRIC:
512 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
513 break;
514 case FSF_TOPO_AL:
515 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
516 /* fall through */
517 default:
518 dev_err(&adapter->ccw_device->dev,
519 "Unknown or unsupported arbitrated loop "
520 "fibre channel topology detected\n");
521 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
522 return -EIO;
523 }
524
525 return 0;
526 }
527
528 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
529 {
530 struct zfcp_adapter *adapter = req->adapter;
531 struct fsf_qtcb *qtcb = req->qtcb;
532 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
533 struct Scsi_Host *shost = adapter->scsi_host;
534
535 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
536 return;
537
538 adapter->fsf_lic_version = bottom->lic_version;
539 adapter->adapter_features = bottom->adapter_features;
540 adapter->connection_features = bottom->connection_features;
541 adapter->peer_wwpn = 0;
542 adapter->peer_wwnn = 0;
543 adapter->peer_d_id = 0;
544
545 switch (qtcb->header.fsf_status) {
546 case FSF_GOOD:
547 if (zfcp_fsf_exchange_config_evaluate(req))
548 return;
549
550 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
551 dev_err(&adapter->ccw_device->dev,
552 "FCP adapter maximum QTCB size (%d bytes) "
553 "is too small\n",
554 bottom->max_qtcb_size);
555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
556 return;
557 }
558 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
559 &adapter->status);
560 break;
561 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
562 fc_host_node_name(shost) = 0;
563 fc_host_port_name(shost) = 0;
564 fc_host_port_id(shost) = 0;
565 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
566 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
567 adapter->hydra_version = 0;
568
569 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
570 &adapter->status);
571
572 zfcp_fsf_link_down_info_eval(req, "fsecdh2",
573 &qtcb->header.fsf_status_qual.link_down_info);
574 break;
575 default:
576 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
577 return;
578 }
579
580 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
581 adapter->hardware_version = bottom->hardware_version;
582 memcpy(fc_host_serial_number(shost), bottom->serial_number,
583 min(FC_SERIAL_NUMBER_SIZE, 17));
584 EBCASC(fc_host_serial_number(shost),
585 min(FC_SERIAL_NUMBER_SIZE, 17));
586 }
587
588 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
589 dev_err(&adapter->ccw_device->dev,
590 "The FCP adapter only supports newer "
591 "control block versions\n");
592 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
593 return;
594 }
595 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
596 dev_err(&adapter->ccw_device->dev,
597 "The FCP adapter only supports older "
598 "control block versions\n");
599 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
600 }
601 }
602
603 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
604 {
605 struct zfcp_adapter *adapter = req->adapter;
606 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
607 struct Scsi_Host *shost = adapter->scsi_host;
608
609 if (req->data)
610 memcpy(req->data, bottom, sizeof(*bottom));
611
612 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
613 fc_host_permanent_port_name(shost) = bottom->wwpn;
614 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
615 } else
616 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
617 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
618 fc_host_supported_speeds(shost) = bottom->supported_speed;
619 }
620
621 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
622 {
623 struct fsf_qtcb *qtcb = req->qtcb;
624
625 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
626 return;
627
628 switch (qtcb->header.fsf_status) {
629 case FSF_GOOD:
630 zfcp_fsf_exchange_port_evaluate(req);
631 break;
632 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
633 zfcp_fsf_exchange_port_evaluate(req);
634 zfcp_fsf_link_down_info_eval(req, "fsepdh1",
635 &qtcb->header.fsf_status_qual.link_down_info);
636 break;
637 }
638 }
639
640 static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
641 {
642 struct zfcp_qdio_queue *req_q = &adapter->req_q;
643
644 spin_lock_bh(&adapter->req_q_lock);
645 if (atomic_read(&req_q->count))
646 return 1;
647 spin_unlock_bh(&adapter->req_q_lock);
648 return 0;
649 }
650
651 static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
652 {
653 long ret;
654
655 spin_unlock_bh(&adapter->req_q_lock);
656 ret = wait_event_interruptible_timeout(adapter->request_wq,
657 zfcp_fsf_sbal_check(adapter), 5 * HZ);
658 if (ret > 0)
659 return 0;
660 if (!ret) {
661 atomic_inc(&adapter->qdio_outb_full);
662 /* assume hanging outbound queue, try queue recovery */
663 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
664 }
665
666 spin_lock_bh(&adapter->req_q_lock);
667 return -EIO;
668 }
669
670 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
671 {
672 struct zfcp_fsf_req *req;
673
674 if (likely(pool))
675 req = mempool_alloc(pool, GFP_ATOMIC);
676 else
677 req = kmalloc(sizeof(*req), GFP_ATOMIC);
678
679 if (unlikely(!req))
680 return NULL;
681
682 memset(req, 0, sizeof(*req));
683 req->pool = pool;
684 return req;
685 }
686
687 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
688 {
689 struct fsf_qtcb *qtcb;
690
691 if (likely(pool))
692 qtcb = mempool_alloc(pool, GFP_ATOMIC);
693 else
694 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
695
696 if (unlikely(!qtcb))
697 return NULL;
698
699 memset(qtcb, 0, sizeof(*qtcb));
700 return qtcb;
701 }
702
703 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
704 u32 fsf_cmd, mempool_t *pool)
705 {
706 struct qdio_buffer_element *sbale;
707 struct zfcp_qdio_queue *req_q = &adapter->req_q;
708 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
709
710 if (unlikely(!req))
711 return ERR_PTR(-ENOMEM);
712
713 if (adapter->req_no == 0)
714 adapter->req_no++;
715
716 INIT_LIST_HEAD(&req->list);
717 init_timer(&req->timer);
718 init_completion(&req->completion);
719
720 req->adapter = adapter;
721 req->fsf_command = fsf_cmd;
722 req->req_id = adapter->req_no;
723 req->queue_req.sbal_number = 1;
724 req->queue_req.sbal_first = req_q->first;
725 req->queue_req.sbal_last = req_q->first;
726 req->queue_req.sbale_curr = 1;
727
728 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
729 sbale[0].addr = (void *) req->req_id;
730 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
731
732 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
733 if (likely(pool))
734 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
735 else
736 req->qtcb = zfcp_qtcb_alloc(NULL);
737
738 if (unlikely(!req->qtcb)) {
739 zfcp_fsf_req_free(req);
740 return ERR_PTR(-ENOMEM);
741 }
742
743 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
744 req->qtcb->prefix.req_id = req->req_id;
745 req->qtcb->prefix.ulp_info = 26;
746 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
747 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
748 req->qtcb->header.req_handle = req->req_id;
749 req->qtcb->header.fsf_command = req->fsf_command;
750 req->seq_no = adapter->fsf_req_seq_no;
751 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
752 sbale[1].addr = (void *) req->qtcb;
753 sbale[1].length = sizeof(struct fsf_qtcb);
754 }
755
756 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
757 zfcp_fsf_req_free(req);
758 return ERR_PTR(-EIO);
759 }
760
761 return req;
762 }
763
764 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
765 {
766 struct zfcp_adapter *adapter = req->adapter;
767 unsigned long flags;
768 int idx;
769 int with_qtcb = (req->qtcb != NULL);
770
771 /* put allocated FSF request into hash table */
772 spin_lock_irqsave(&adapter->req_list_lock, flags);
773 idx = zfcp_reqlist_hash(req->req_id);
774 list_add_tail(&req->list, &adapter->req_list[idx]);
775 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
776
777 req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count);
778 req->issued = get_clock();
779 if (zfcp_qdio_send(adapter, &req->queue_req)) {
780 del_timer(&req->timer);
781 spin_lock_irqsave(&adapter->req_list_lock, flags);
782 /* lookup request again, list might have changed */
783 if (zfcp_reqlist_find_safe(adapter, req))
784 zfcp_reqlist_remove(adapter, req);
785 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
786 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
787 return -EIO;
788 }
789
790 /* Don't increase for unsolicited status */
791 if (with_qtcb)
792 adapter->fsf_req_seq_no++;
793 adapter->req_no++;
794
795 return 0;
796 }
797
798 /**
799 * zfcp_fsf_status_read - send status read request
800 * @adapter: pointer to struct zfcp_adapter
801 * @req_flags: request flags
802 * Returns: 0 on success, ERROR otherwise
803 */
804 int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
805 {
806 struct zfcp_fsf_req *req;
807 struct fsf_status_read_buffer *sr_buf;
808 struct qdio_buffer_element *sbale;
809 int retval = -EIO;
810
811 spin_lock_bh(&adapter->req_q_lock);
812 if (zfcp_fsf_req_sbal_get(adapter))
813 goto out;
814
815 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
816 adapter->pool.status_read_req);
817 if (IS_ERR(req)) {
818 retval = PTR_ERR(req);
819 goto out;
820 }
821
822 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
823 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
824 req->queue_req.sbale_curr = 2;
825
826 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
827 if (!sr_buf) {
828 retval = -ENOMEM;
829 goto failed_buf;
830 }
831 memset(sr_buf, 0, sizeof(*sr_buf));
832 req->data = sr_buf;
833 sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req);
834 sbale->addr = (void *) sr_buf;
835 sbale->length = sizeof(*sr_buf);
836
837 retval = zfcp_fsf_req_send(req);
838 if (retval)
839 goto failed_req_send;
840
841 goto out;
842
843 failed_req_send:
844 mempool_free(sr_buf, adapter->pool.status_read_data);
845 failed_buf:
846 zfcp_fsf_req_free(req);
847 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
848 out:
849 spin_unlock_bh(&adapter->req_q_lock);
850 return retval;
851 }
852
853 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
854 {
855 struct zfcp_unit *unit = req->data;
856 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
857
858 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
859 return;
860
861 switch (req->qtcb->header.fsf_status) {
862 case FSF_PORT_HANDLE_NOT_VALID:
863 if (fsq->word[0] == fsq->word[1]) {
864 zfcp_erp_adapter_reopen(unit->port->adapter, 0,
865 "fsafch1", req);
866 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
867 }
868 break;
869 case FSF_LUN_HANDLE_NOT_VALID:
870 if (fsq->word[0] == fsq->word[1]) {
871 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
872 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
873 }
874 break;
875 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
876 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
877 break;
878 case FSF_PORT_BOXED:
879 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
880 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
881 ZFCP_STATUS_FSFREQ_RETRY;
882 break;
883 case FSF_LUN_BOXED:
884 zfcp_erp_unit_boxed(unit, "fsafch4", req);
885 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
886 ZFCP_STATUS_FSFREQ_RETRY;
887 break;
888 case FSF_ADAPTER_STATUS_AVAILABLE:
889 switch (fsq->word[0]) {
890 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
891 zfcp_test_link(unit->port);
892 /* fall through */
893 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
894 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
895 break;
896 }
897 break;
898 case FSF_GOOD:
899 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
900 break;
901 }
902 }
903
904 /**
905 * zfcp_fsf_abort_fcp_command - abort running SCSI command
906 * @old_req_id: unsigned long
907 * @unit: pointer to struct zfcp_unit
908 * Returns: pointer to struct zfcp_fsf_req
909 */
910
911 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
912 struct zfcp_unit *unit)
913 {
914 struct qdio_buffer_element *sbale;
915 struct zfcp_fsf_req *req = NULL;
916 struct zfcp_adapter *adapter = unit->port->adapter;
917
918 spin_lock_bh(&adapter->req_q_lock);
919 if (zfcp_fsf_req_sbal_get(adapter))
920 goto out;
921 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
922 adapter->pool.scsi_abort);
923 if (IS_ERR(req)) {
924 req = NULL;
925 goto out;
926 }
927
928 if (unlikely(!(atomic_read(&unit->status) &
929 ZFCP_STATUS_COMMON_UNBLOCKED)))
930 goto out_error_free;
931
932 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
933 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
934 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
935
936 req->data = unit;
937 req->handler = zfcp_fsf_abort_fcp_command_handler;
938 req->qtcb->header.lun_handle = unit->handle;
939 req->qtcb->header.port_handle = unit->port->handle;
940 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
941
942 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
943 if (!zfcp_fsf_req_send(req))
944 goto out;
945
946 out_error_free:
947 zfcp_fsf_req_free(req);
948 req = NULL;
949 out:
950 spin_unlock_bh(&adapter->req_q_lock);
951 return req;
952 }
953
954 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
955 {
956 struct zfcp_adapter *adapter = req->adapter;
957 struct zfcp_send_ct *send_ct = req->data;
958 struct fsf_qtcb_header *header = &req->qtcb->header;
959
960 send_ct->status = -EINVAL;
961
962 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
963 goto skip_fsfstatus;
964
965 switch (header->fsf_status) {
966 case FSF_GOOD:
967 zfcp_san_dbf_event_ct_response(req);
968 send_ct->status = 0;
969 break;
970 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
971 zfcp_fsf_class_not_supp(req);
972 break;
973 case FSF_ADAPTER_STATUS_AVAILABLE:
974 switch (header->fsf_status_qual.word[0]){
975 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
976 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
977 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
978 break;
979 }
980 break;
981 case FSF_ACCESS_DENIED:
982 break;
983 case FSF_PORT_BOXED:
984 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
985 ZFCP_STATUS_FSFREQ_RETRY;
986 break;
987 case FSF_PORT_HANDLE_NOT_VALID:
988 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
989 /* fall through */
990 case FSF_GENERIC_COMMAND_REJECTED:
991 case FSF_PAYLOAD_SIZE_MISMATCH:
992 case FSF_REQUEST_SIZE_TOO_LARGE:
993 case FSF_RESPONSE_SIZE_TOO_LARGE:
994 case FSF_SBAL_MISMATCH:
995 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
996 break;
997 }
998
999 skip_fsfstatus:
1000 if (send_ct->handler)
1001 send_ct->handler(send_ct->handler_data);
1002 }
1003
1004 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1005 struct scatterlist *sg_req,
1006 struct scatterlist *sg_resp)
1007 {
1008 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1009 sbale[2].addr = sg_virt(sg_req);
1010 sbale[2].length = sg_req->length;
1011 sbale[3].addr = sg_virt(sg_resp);
1012 sbale[3].length = sg_resp->length;
1013 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1014 }
1015
1016 static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1017 {
1018 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1019 }
1020
1021 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1022 struct scatterlist *sg_req,
1023 struct scatterlist *sg_resp,
1024 int max_sbals)
1025 {
1026 struct zfcp_adapter *adapter = req->adapter;
1027 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter,
1028 &req->queue_req);
1029 u32 feat = adapter->adapter_features;
1030 int bytes;
1031
1032 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1033 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1034 return -EOPNOTSUPP;
1035
1036 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1037 return 0;
1038 }
1039
1040 /* use single, unchained SBAL if it can hold the request */
1041 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1042 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1043 return 0;
1044 }
1045
1046 bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req,
1047 SBAL_FLAGS0_TYPE_WRITE_READ,
1048 sg_req, max_sbals);
1049 if (bytes <= 0)
1050 return -EIO;
1051 req->qtcb->bottom.support.req_buf_length = bytes;
1052 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1053
1054 bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req,
1055 SBAL_FLAGS0_TYPE_WRITE_READ,
1056 sg_resp, max_sbals);
1057 if (bytes <= 0)
1058 return -EIO;
1059 req->qtcb->bottom.support.resp_buf_length = bytes;
1060
1061 return 0;
1062 }
1063
1064 /**
1065 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1066 * @ct: pointer to struct zfcp_send_ct with data for request
1067 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1068 * @erp_action: if non-null the Generic Service request sent within ERP
1069 */
1070 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1071 struct zfcp_erp_action *erp_action)
1072 {
1073 struct zfcp_wka_port *wka_port = ct->wka_port;
1074 struct zfcp_adapter *adapter = wka_port->adapter;
1075 struct zfcp_fsf_req *req;
1076 int ret = -EIO;
1077
1078 spin_lock_bh(&adapter->req_q_lock);
1079 if (zfcp_fsf_req_sbal_get(adapter))
1080 goto out;
1081
1082 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool);
1083
1084 if (IS_ERR(req)) {
1085 ret = PTR_ERR(req);
1086 goto out;
1087 }
1088
1089 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1090 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
1091 FSF_MAX_SBALS_PER_REQ);
1092 if (ret)
1093 goto failed_send;
1094
1095 req->handler = zfcp_fsf_send_ct_handler;
1096 req->qtcb->header.port_handle = wka_port->handle;
1097 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1098 req->qtcb->bottom.support.timeout = ct->timeout;
1099 req->data = ct;
1100
1101 zfcp_san_dbf_event_ct_request(req);
1102
1103 if (erp_action) {
1104 erp_action->fsf_req = req;
1105 req->erp_action = erp_action;
1106 zfcp_fsf_start_erp_timer(req);
1107 } else
1108 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1109
1110 ret = zfcp_fsf_req_send(req);
1111 if (ret)
1112 goto failed_send;
1113
1114 goto out;
1115
1116 failed_send:
1117 zfcp_fsf_req_free(req);
1118 if (erp_action)
1119 erp_action->fsf_req = NULL;
1120 out:
1121 spin_unlock_bh(&adapter->req_q_lock);
1122 return ret;
1123 }
1124
1125 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1126 {
1127 struct zfcp_send_els *send_els = req->data;
1128 struct zfcp_port *port = send_els->port;
1129 struct fsf_qtcb_header *header = &req->qtcb->header;
1130
1131 send_els->status = -EINVAL;
1132
1133 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1134 goto skip_fsfstatus;
1135
1136 switch (header->fsf_status) {
1137 case FSF_GOOD:
1138 zfcp_san_dbf_event_els_response(req);
1139 send_els->status = 0;
1140 break;
1141 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1142 zfcp_fsf_class_not_supp(req);
1143 break;
1144 case FSF_ADAPTER_STATUS_AVAILABLE:
1145 switch (header->fsf_status_qual.word[0]){
1146 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1147 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1148 zfcp_test_link(port);
1149 /*fall through */
1150 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1151 case FSF_SQ_RETRY_IF_POSSIBLE:
1152 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1153 break;
1154 }
1155 break;
1156 case FSF_ELS_COMMAND_REJECTED:
1157 case FSF_PAYLOAD_SIZE_MISMATCH:
1158 case FSF_REQUEST_SIZE_TOO_LARGE:
1159 case FSF_RESPONSE_SIZE_TOO_LARGE:
1160 break;
1161 case FSF_ACCESS_DENIED:
1162 if (port)
1163 zfcp_fsf_access_denied_port(req, port);
1164 break;
1165 case FSF_SBAL_MISMATCH:
1166 /* should never occure, avoided in zfcp_fsf_send_els */
1167 /* fall through */
1168 default:
1169 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1170 break;
1171 }
1172 skip_fsfstatus:
1173 if (send_els->handler)
1174 send_els->handler(send_els->handler_data);
1175 }
1176
1177 /**
1178 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1179 * @els: pointer to struct zfcp_send_els with data for the command
1180 */
1181 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1182 {
1183 struct zfcp_fsf_req *req;
1184 struct zfcp_adapter *adapter = els->adapter;
1185 struct fsf_qtcb_bottom_support *bottom;
1186 int ret = -EIO;
1187
1188 spin_lock_bh(&adapter->req_q_lock);
1189 if (zfcp_fsf_req_sbal_get(adapter))
1190 goto out;
1191
1192 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL);
1193
1194 if (IS_ERR(req)) {
1195 ret = PTR_ERR(req);
1196 goto out;
1197 }
1198
1199 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1200 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
1201
1202 if (ret)
1203 goto failed_send;
1204
1205 bottom = &req->qtcb->bottom.support;
1206 req->handler = zfcp_fsf_send_els_handler;
1207 bottom->d_id = els->d_id;
1208 bottom->service_class = FSF_CLASS_3;
1209 bottom->timeout = 2 * R_A_TOV;
1210 req->data = els;
1211
1212 zfcp_san_dbf_event_els_request(req);
1213
1214 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1215 ret = zfcp_fsf_req_send(req);
1216 if (ret)
1217 goto failed_send;
1218
1219 goto out;
1220
1221 failed_send:
1222 zfcp_fsf_req_free(req);
1223 out:
1224 spin_unlock_bh(&adapter->req_q_lock);
1225 return ret;
1226 }
1227
1228 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1229 {
1230 struct qdio_buffer_element *sbale;
1231 struct zfcp_fsf_req *req;
1232 struct zfcp_adapter *adapter = erp_action->adapter;
1233 int retval = -EIO;
1234
1235 spin_lock_bh(&adapter->req_q_lock);
1236 if (zfcp_fsf_req_sbal_get(adapter))
1237 goto out;
1238
1239 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1240 adapter->pool.erp_req);
1241
1242 if (IS_ERR(req)) {
1243 retval = PTR_ERR(req);
1244 goto out;
1245 }
1246
1247 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1248 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1249 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1250 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1251
1252 req->qtcb->bottom.config.feature_selection =
1253 FSF_FEATURE_CFDC |
1254 FSF_FEATURE_LUN_SHARING |
1255 FSF_FEATURE_NOTIFICATION_LOST |
1256 FSF_FEATURE_UPDATE_ALERT;
1257 req->erp_action = erp_action;
1258 req->handler = zfcp_fsf_exchange_config_data_handler;
1259 erp_action->fsf_req = req;
1260
1261 zfcp_fsf_start_erp_timer(req);
1262 retval = zfcp_fsf_req_send(req);
1263 if (retval) {
1264 zfcp_fsf_req_free(req);
1265 erp_action->fsf_req = NULL;
1266 }
1267 out:
1268 spin_unlock_bh(&adapter->req_q_lock);
1269 return retval;
1270 }
1271
1272 int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1273 struct fsf_qtcb_bottom_config *data)
1274 {
1275 struct qdio_buffer_element *sbale;
1276 struct zfcp_fsf_req *req = NULL;
1277 int retval = -EIO;
1278
1279 spin_lock_bh(&adapter->req_q_lock);
1280 if (zfcp_fsf_req_sbal_get(adapter))
1281 goto out_unlock;
1282
1283 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1284
1285 if (IS_ERR(req)) {
1286 retval = PTR_ERR(req);
1287 goto out_unlock;
1288 }
1289
1290 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1291 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1292 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1293 req->handler = zfcp_fsf_exchange_config_data_handler;
1294
1295 req->qtcb->bottom.config.feature_selection =
1296 FSF_FEATURE_CFDC |
1297 FSF_FEATURE_LUN_SHARING |
1298 FSF_FEATURE_NOTIFICATION_LOST |
1299 FSF_FEATURE_UPDATE_ALERT;
1300
1301 if (data)
1302 req->data = data;
1303
1304 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1305 retval = zfcp_fsf_req_send(req);
1306 spin_unlock_bh(&adapter->req_q_lock);
1307 if (!retval)
1308 wait_for_completion(&req->completion);
1309
1310 zfcp_fsf_req_free(req);
1311 return retval;
1312
1313 out_unlock:
1314 spin_unlock_bh(&adapter->req_q_lock);
1315 return retval;
1316 }
1317
1318 /**
1319 * zfcp_fsf_exchange_port_data - request information about local port
1320 * @erp_action: ERP action for the adapter for which port data is requested
1321 * Returns: 0 on success, error otherwise
1322 */
1323 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1324 {
1325 struct qdio_buffer_element *sbale;
1326 struct zfcp_fsf_req *req;
1327 struct zfcp_adapter *adapter = erp_action->adapter;
1328 int retval = -EIO;
1329
1330 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1331 return -EOPNOTSUPP;
1332
1333 spin_lock_bh(&adapter->req_q_lock);
1334 if (zfcp_fsf_req_sbal_get(adapter))
1335 goto out;
1336
1337 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1338 adapter->pool.erp_req);
1339
1340 if (IS_ERR(req)) {
1341 retval = PTR_ERR(req);
1342 goto out;
1343 }
1344
1345 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1346 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1347 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1348 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1349
1350 req->handler = zfcp_fsf_exchange_port_data_handler;
1351 req->erp_action = erp_action;
1352 erp_action->fsf_req = req;
1353
1354 zfcp_fsf_start_erp_timer(req);
1355 retval = zfcp_fsf_req_send(req);
1356 if (retval) {
1357 zfcp_fsf_req_free(req);
1358 erp_action->fsf_req = NULL;
1359 }
1360 out:
1361 spin_unlock_bh(&adapter->req_q_lock);
1362 return retval;
1363 }
1364
1365 /**
1366 * zfcp_fsf_exchange_port_data_sync - request information about local port
1367 * @adapter: pointer to struct zfcp_adapter
1368 * @data: pointer to struct fsf_qtcb_bottom_port
1369 * Returns: 0 on success, error otherwise
1370 */
1371 int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1372 struct fsf_qtcb_bottom_port *data)
1373 {
1374 struct qdio_buffer_element *sbale;
1375 struct zfcp_fsf_req *req = NULL;
1376 int retval = -EIO;
1377
1378 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1379 return -EOPNOTSUPP;
1380
1381 spin_lock_bh(&adapter->req_q_lock);
1382 if (zfcp_fsf_req_sbal_get(adapter))
1383 goto out_unlock;
1384
1385 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1386
1387 if (IS_ERR(req)) {
1388 retval = PTR_ERR(req);
1389 goto out_unlock;
1390 }
1391
1392 if (data)
1393 req->data = data;
1394
1395 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1396 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1397 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1398
1399 req->handler = zfcp_fsf_exchange_port_data_handler;
1400 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1401 retval = zfcp_fsf_req_send(req);
1402 spin_unlock_bh(&adapter->req_q_lock);
1403
1404 if (!retval)
1405 wait_for_completion(&req->completion);
1406
1407 zfcp_fsf_req_free(req);
1408
1409 return retval;
1410
1411 out_unlock:
1412 spin_unlock_bh(&adapter->req_q_lock);
1413 return retval;
1414 }
1415
1416 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1417 {
1418 struct zfcp_port *port = req->data;
1419 struct fsf_qtcb_header *header = &req->qtcb->header;
1420 struct fsf_plogi *plogi;
1421
1422 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1423 goto out;
1424
1425 switch (header->fsf_status) {
1426 case FSF_PORT_ALREADY_OPEN:
1427 break;
1428 case FSF_ACCESS_DENIED:
1429 zfcp_fsf_access_denied_port(req, port);
1430 break;
1431 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1432 dev_warn(&req->adapter->ccw_device->dev,
1433 "Not enough FCP adapter resources to open "
1434 "remote port 0x%016Lx\n",
1435 (unsigned long long)port->wwpn);
1436 zfcp_erp_port_failed(port, "fsoph_1", req);
1437 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1438 break;
1439 case FSF_ADAPTER_STATUS_AVAILABLE:
1440 switch (header->fsf_status_qual.word[0]) {
1441 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1442 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1443 case FSF_SQ_NO_RETRY_POSSIBLE:
1444 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1445 break;
1446 }
1447 break;
1448 case FSF_GOOD:
1449 port->handle = header->port_handle;
1450 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1451 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1452 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1453 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1454 &port->status);
1455 /* check whether D_ID has changed during open */
1456 /*
1457 * FIXME: This check is not airtight, as the FCP channel does
1458 * not monitor closures of target port connections caused on
1459 * the remote side. Thus, they might miss out on invalidating
1460 * locally cached WWPNs (and other N_Port parameters) of gone
1461 * target ports. So, our heroic attempt to make things safe
1462 * could be undermined by 'open port' response data tagged with
1463 * obsolete WWPNs. Another reason to monitor potential
1464 * connection closures ourself at least (by interpreting
1465 * incoming ELS' and unsolicited status). It just crosses my
1466 * mind that one should be able to cross-check by means of
1467 * another GID_PN straight after a port has been opened.
1468 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1469 */
1470 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1471 if (req->qtcb->bottom.support.els1_length >=
1472 FSF_PLOGI_MIN_LEN) {
1473 if (plogi->serv_param.wwpn != port->wwpn)
1474 port->d_id = 0;
1475 else {
1476 port->wwnn = plogi->serv_param.wwnn;
1477 zfcp_fc_plogi_evaluate(port, plogi);
1478 }
1479 }
1480 break;
1481 case FSF_UNKNOWN_OP_SUBTYPE:
1482 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1483 break;
1484 }
1485
1486 out:
1487 zfcp_port_put(port);
1488 }
1489
1490 /**
1491 * zfcp_fsf_open_port - create and send open port request
1492 * @erp_action: pointer to struct zfcp_erp_action
1493 * Returns: 0 on success, error otherwise
1494 */
1495 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1496 {
1497 struct qdio_buffer_element *sbale;
1498 struct zfcp_adapter *adapter = erp_action->adapter;
1499 struct zfcp_fsf_req *req;
1500 struct zfcp_port *port = erp_action->port;
1501 int retval = -EIO;
1502
1503 spin_lock_bh(&adapter->req_q_lock);
1504 if (zfcp_fsf_req_sbal_get(adapter))
1505 goto out;
1506
1507 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID,
1508 adapter->pool.erp_req);
1509
1510 if (IS_ERR(req)) {
1511 retval = PTR_ERR(req);
1512 goto out;
1513 }
1514
1515 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1516 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1517 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1518 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1519
1520 req->handler = zfcp_fsf_open_port_handler;
1521 req->qtcb->bottom.support.d_id = port->d_id;
1522 req->data = port;
1523 req->erp_action = erp_action;
1524 erp_action->fsf_req = req;
1525 zfcp_port_get(port);
1526
1527 zfcp_fsf_start_erp_timer(req);
1528 retval = zfcp_fsf_req_send(req);
1529 if (retval) {
1530 zfcp_fsf_req_free(req);
1531 erp_action->fsf_req = NULL;
1532 zfcp_port_put(port);
1533 }
1534 out:
1535 spin_unlock_bh(&adapter->req_q_lock);
1536 return retval;
1537 }
1538
1539 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1540 {
1541 struct zfcp_port *port = req->data;
1542
1543 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1544 return;
1545
1546 switch (req->qtcb->header.fsf_status) {
1547 case FSF_PORT_HANDLE_NOT_VALID:
1548 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1549 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1550 break;
1551 case FSF_ADAPTER_STATUS_AVAILABLE:
1552 break;
1553 case FSF_GOOD:
1554 zfcp_erp_modify_port_status(port, "fscph_2", req,
1555 ZFCP_STATUS_COMMON_OPEN,
1556 ZFCP_CLEAR);
1557 break;
1558 }
1559 }
1560
1561 /**
1562 * zfcp_fsf_close_port - create and send close port request
1563 * @erp_action: pointer to struct zfcp_erp_action
1564 * Returns: 0 on success, error otherwise
1565 */
1566 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1567 {
1568 struct qdio_buffer_element *sbale;
1569 struct zfcp_adapter *adapter = erp_action->adapter;
1570 struct zfcp_fsf_req *req;
1571 int retval = -EIO;
1572
1573 spin_lock_bh(&adapter->req_q_lock);
1574 if (zfcp_fsf_req_sbal_get(adapter))
1575 goto out;
1576
1577 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1578 adapter->pool.erp_req);
1579
1580 if (IS_ERR(req)) {
1581 retval = PTR_ERR(req);
1582 goto out;
1583 }
1584
1585 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1586 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1587 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1588 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1589
1590 req->handler = zfcp_fsf_close_port_handler;
1591 req->data = erp_action->port;
1592 req->erp_action = erp_action;
1593 req->qtcb->header.port_handle = erp_action->port->handle;
1594 erp_action->fsf_req = req;
1595
1596 zfcp_fsf_start_erp_timer(req);
1597 retval = zfcp_fsf_req_send(req);
1598 if (retval) {
1599 zfcp_fsf_req_free(req);
1600 erp_action->fsf_req = NULL;
1601 }
1602 out:
1603 spin_unlock_bh(&adapter->req_q_lock);
1604 return retval;
1605 }
1606
1607 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1608 {
1609 struct zfcp_wka_port *wka_port = req->data;
1610 struct fsf_qtcb_header *header = &req->qtcb->header;
1611
1612 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1613 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1614 goto out;
1615 }
1616
1617 switch (header->fsf_status) {
1618 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1619 dev_warn(&req->adapter->ccw_device->dev,
1620 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1621 /* fall through */
1622 case FSF_ADAPTER_STATUS_AVAILABLE:
1623 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1624 /* fall through */
1625 case FSF_ACCESS_DENIED:
1626 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1627 break;
1628 case FSF_GOOD:
1629 wka_port->handle = header->port_handle;
1630 /* fall through */
1631 case FSF_PORT_ALREADY_OPEN:
1632 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1633 }
1634 out:
1635 wake_up(&wka_port->completion_wq);
1636 }
1637
1638 /**
1639 * zfcp_fsf_open_wka_port - create and send open wka-port request
1640 * @wka_port: pointer to struct zfcp_wka_port
1641 * Returns: 0 on success, error otherwise
1642 */
1643 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1644 {
1645 struct qdio_buffer_element *sbale;
1646 struct zfcp_adapter *adapter = wka_port->adapter;
1647 struct zfcp_fsf_req *req;
1648 int retval = -EIO;
1649
1650 spin_lock_bh(&adapter->req_q_lock);
1651 if (zfcp_fsf_req_sbal_get(adapter))
1652 goto out;
1653
1654 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID,
1655 adapter->pool.erp_req);
1656
1657 if (unlikely(IS_ERR(req))) {
1658 retval = PTR_ERR(req);
1659 goto out;
1660 }
1661
1662 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1663 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1664 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1665 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1666
1667 req->handler = zfcp_fsf_open_wka_port_handler;
1668 req->qtcb->bottom.support.d_id = wka_port->d_id;
1669 req->data = wka_port;
1670
1671 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1672 retval = zfcp_fsf_req_send(req);
1673 if (retval)
1674 zfcp_fsf_req_free(req);
1675 out:
1676 spin_unlock_bh(&adapter->req_q_lock);
1677 return retval;
1678 }
1679
1680 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1681 {
1682 struct zfcp_wka_port *wka_port = req->data;
1683
1684 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1685 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1686 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1687 }
1688
1689 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1690 wake_up(&wka_port->completion_wq);
1691 }
1692
1693 /**
1694 * zfcp_fsf_close_wka_port - create and send close wka port request
1695 * @erp_action: pointer to struct zfcp_erp_action
1696 * Returns: 0 on success, error otherwise
1697 */
1698 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1699 {
1700 struct qdio_buffer_element *sbale;
1701 struct zfcp_adapter *adapter = wka_port->adapter;
1702 struct zfcp_fsf_req *req;
1703 int retval = -EIO;
1704
1705 spin_lock_bh(&adapter->req_q_lock);
1706 if (zfcp_fsf_req_sbal_get(adapter))
1707 goto out;
1708
1709 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1710 adapter->pool.erp_req);
1711
1712 if (unlikely(IS_ERR(req))) {
1713 retval = PTR_ERR(req);
1714 goto out;
1715 }
1716
1717 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1718 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1719 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1720 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1721
1722 req->handler = zfcp_fsf_close_wka_port_handler;
1723 req->data = wka_port;
1724 req->qtcb->header.port_handle = wka_port->handle;
1725
1726 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1727 retval = zfcp_fsf_req_send(req);
1728 if (retval)
1729 zfcp_fsf_req_free(req);
1730 out:
1731 spin_unlock_bh(&adapter->req_q_lock);
1732 return retval;
1733 }
1734
1735 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1736 {
1737 struct zfcp_port *port = req->data;
1738 struct fsf_qtcb_header *header = &req->qtcb->header;
1739 struct zfcp_unit *unit;
1740
1741 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1742 return;
1743
1744 switch (header->fsf_status) {
1745 case FSF_PORT_HANDLE_NOT_VALID:
1746 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1747 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1748 break;
1749 case FSF_ACCESS_DENIED:
1750 zfcp_fsf_access_denied_port(req, port);
1751 break;
1752 case FSF_PORT_BOXED:
1753 /* can't use generic zfcp_erp_modify_port_status because
1754 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1755 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1756 list_for_each_entry(unit, &port->unit_list_head, list)
1757 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1758 &unit->status);
1759 zfcp_erp_port_boxed(port, "fscpph2", req);
1760 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1761 ZFCP_STATUS_FSFREQ_RETRY;
1762
1763 break;
1764 case FSF_ADAPTER_STATUS_AVAILABLE:
1765 switch (header->fsf_status_qual.word[0]) {
1766 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1767 /* fall through */
1768 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1769 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1770 break;
1771 }
1772 break;
1773 case FSF_GOOD:
1774 /* can't use generic zfcp_erp_modify_port_status because
1775 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1776 */
1777 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1778 list_for_each_entry(unit, &port->unit_list_head, list)
1779 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1780 &unit->status);
1781 break;
1782 }
1783 }
1784
1785 /**
1786 * zfcp_fsf_close_physical_port - close physical port
1787 * @erp_action: pointer to struct zfcp_erp_action
1788 * Returns: 0 on success
1789 */
1790 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1791 {
1792 struct qdio_buffer_element *sbale;
1793 struct zfcp_adapter *adapter = erp_action->adapter;
1794 struct zfcp_fsf_req *req;
1795 int retval = -EIO;
1796
1797 spin_lock_bh(&adapter->req_q_lock);
1798 if (zfcp_fsf_req_sbal_get(adapter))
1799 goto out;
1800
1801 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1802 adapter->pool.erp_req);
1803
1804 if (IS_ERR(req)) {
1805 retval = PTR_ERR(req);
1806 goto out;
1807 }
1808
1809 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1810 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1811 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1812 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1813
1814 req->data = erp_action->port;
1815 req->qtcb->header.port_handle = erp_action->port->handle;
1816 req->erp_action = erp_action;
1817 req->handler = zfcp_fsf_close_physical_port_handler;
1818 erp_action->fsf_req = req;
1819
1820 zfcp_fsf_start_erp_timer(req);
1821 retval = zfcp_fsf_req_send(req);
1822 if (retval) {
1823 zfcp_fsf_req_free(req);
1824 erp_action->fsf_req = NULL;
1825 }
1826 out:
1827 spin_unlock_bh(&adapter->req_q_lock);
1828 return retval;
1829 }
1830
1831 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1832 {
1833 struct zfcp_adapter *adapter = req->adapter;
1834 struct zfcp_unit *unit = req->data;
1835 struct fsf_qtcb_header *header = &req->qtcb->header;
1836 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1837 struct fsf_queue_designator *queue_designator =
1838 &header->fsf_status_qual.fsf_queue_designator;
1839 int exclusive, readwrite;
1840
1841 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1842 return;
1843
1844 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1845 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1846 ZFCP_STATUS_UNIT_SHARED |
1847 ZFCP_STATUS_UNIT_READONLY,
1848 &unit->status);
1849
1850 switch (header->fsf_status) {
1851
1852 case FSF_PORT_HANDLE_NOT_VALID:
1853 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1854 /* fall through */
1855 case FSF_LUN_ALREADY_OPEN:
1856 break;
1857 case FSF_ACCESS_DENIED:
1858 zfcp_fsf_access_denied_unit(req, unit);
1859 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1860 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1861 break;
1862 case FSF_PORT_BOXED:
1863 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1864 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1865 ZFCP_STATUS_FSFREQ_RETRY;
1866 break;
1867 case FSF_LUN_SHARING_VIOLATION:
1868 if (header->fsf_status_qual.word[0])
1869 dev_warn(&adapter->ccw_device->dev,
1870 "LUN 0x%Lx on port 0x%Lx is already in "
1871 "use by CSS%d, MIF Image ID %x\n",
1872 (unsigned long long)unit->fcp_lun,
1873 (unsigned long long)unit->port->wwpn,
1874 queue_designator->cssid,
1875 queue_designator->hla);
1876 else
1877 zfcp_act_eval_err(adapter,
1878 header->fsf_status_qual.word[2]);
1879 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1880 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1881 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1882 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1883 break;
1884 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1885 dev_warn(&adapter->ccw_device->dev,
1886 "No handle is available for LUN "
1887 "0x%016Lx on port 0x%016Lx\n",
1888 (unsigned long long)unit->fcp_lun,
1889 (unsigned long long)unit->port->wwpn);
1890 zfcp_erp_unit_failed(unit, "fsouh_4", req);
1891 /* fall through */
1892 case FSF_INVALID_COMMAND_OPTION:
1893 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1894 break;
1895 case FSF_ADAPTER_STATUS_AVAILABLE:
1896 switch (header->fsf_status_qual.word[0]) {
1897 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1898 zfcp_test_link(unit->port);
1899 /* fall through */
1900 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1901 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1902 break;
1903 }
1904 break;
1905
1906 case FSF_GOOD:
1907 unit->handle = header->lun_handle;
1908 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1909
1910 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1911 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1912 !zfcp_ccw_priv_sch(adapter)) {
1913 exclusive = (bottom->lun_access_info &
1914 FSF_UNIT_ACCESS_EXCLUSIVE);
1915 readwrite = (bottom->lun_access_info &
1916 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1917
1918 if (!exclusive)
1919 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1920 &unit->status);
1921
1922 if (!readwrite) {
1923 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1924 &unit->status);
1925 dev_info(&adapter->ccw_device->dev,
1926 "SCSI device at LUN 0x%016Lx on port "
1927 "0x%016Lx opened read-only\n",
1928 (unsigned long long)unit->fcp_lun,
1929 (unsigned long long)unit->port->wwpn);
1930 }
1931
1932 if (exclusive && !readwrite) {
1933 dev_err(&adapter->ccw_device->dev,
1934 "Exclusive read-only access not "
1935 "supported (unit 0x%016Lx, "
1936 "port 0x%016Lx)\n",
1937 (unsigned long long)unit->fcp_lun,
1938 (unsigned long long)unit->port->wwpn);
1939 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1940 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1941 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1942 } else if (!exclusive && readwrite) {
1943 dev_err(&adapter->ccw_device->dev,
1944 "Shared read-write access not "
1945 "supported (unit 0x%016Lx, port "
1946 "0x%016Lx)\n",
1947 (unsigned long long)unit->fcp_lun,
1948 (unsigned long long)unit->port->wwpn);
1949 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1950 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1951 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1952 }
1953 }
1954 break;
1955 }
1956 }
1957
1958 /**
1959 * zfcp_fsf_open_unit - open unit
1960 * @erp_action: pointer to struct zfcp_erp_action
1961 * Returns: 0 on success, error otherwise
1962 */
1963 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1964 {
1965 struct qdio_buffer_element *sbale;
1966 struct zfcp_adapter *adapter = erp_action->adapter;
1967 struct zfcp_fsf_req *req;
1968 int retval = -EIO;
1969
1970 spin_lock_bh(&adapter->req_q_lock);
1971 if (zfcp_fsf_req_sbal_get(adapter))
1972 goto out;
1973
1974 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
1975 adapter->pool.erp_req);
1976
1977 if (IS_ERR(req)) {
1978 retval = PTR_ERR(req);
1979 goto out;
1980 }
1981
1982 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1983 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
1984 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1985 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1986
1987 req->qtcb->header.port_handle = erp_action->port->handle;
1988 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1989 req->handler = zfcp_fsf_open_unit_handler;
1990 req->data = erp_action->unit;
1991 req->erp_action = erp_action;
1992 erp_action->fsf_req = req;
1993
1994 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1995 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1996
1997 zfcp_fsf_start_erp_timer(req);
1998 retval = zfcp_fsf_req_send(req);
1999 if (retval) {
2000 zfcp_fsf_req_free(req);
2001 erp_action->fsf_req = NULL;
2002 }
2003 out:
2004 spin_unlock_bh(&adapter->req_q_lock);
2005 return retval;
2006 }
2007
2008 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2009 {
2010 struct zfcp_unit *unit = req->data;
2011
2012 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2013 return;
2014
2015 switch (req->qtcb->header.fsf_status) {
2016 case FSF_PORT_HANDLE_NOT_VALID:
2017 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
2018 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2019 break;
2020 case FSF_LUN_HANDLE_NOT_VALID:
2021 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
2022 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2023 break;
2024 case FSF_PORT_BOXED:
2025 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2026 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2027 ZFCP_STATUS_FSFREQ_RETRY;
2028 break;
2029 case FSF_ADAPTER_STATUS_AVAILABLE:
2030 switch (req->qtcb->header.fsf_status_qual.word[0]) {
2031 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2032 zfcp_test_link(unit->port);
2033 /* fall through */
2034 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2035 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2036 break;
2037 }
2038 break;
2039 case FSF_GOOD:
2040 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2041 break;
2042 }
2043 }
2044
2045 /**
2046 * zfcp_fsf_close_unit - close zfcp unit
2047 * @erp_action: pointer to struct zfcp_unit
2048 * Returns: 0 on success, error otherwise
2049 */
2050 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2051 {
2052 struct qdio_buffer_element *sbale;
2053 struct zfcp_adapter *adapter = erp_action->adapter;
2054 struct zfcp_fsf_req *req;
2055 int retval = -EIO;
2056
2057 spin_lock_bh(&adapter->req_q_lock);
2058 if (zfcp_fsf_req_sbal_get(adapter))
2059 goto out;
2060
2061 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
2062 adapter->pool.erp_req);
2063
2064 if (IS_ERR(req)) {
2065 retval = PTR_ERR(req);
2066 goto out;
2067 }
2068
2069 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2070 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
2071 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2072 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2073
2074 req->qtcb->header.port_handle = erp_action->port->handle;
2075 req->qtcb->header.lun_handle = erp_action->unit->handle;
2076 req->handler = zfcp_fsf_close_unit_handler;
2077 req->data = erp_action->unit;
2078 req->erp_action = erp_action;
2079 erp_action->fsf_req = req;
2080
2081 zfcp_fsf_start_erp_timer(req);
2082 retval = zfcp_fsf_req_send(req);
2083 if (retval) {
2084 zfcp_fsf_req_free(req);
2085 erp_action->fsf_req = NULL;
2086 }
2087 out:
2088 spin_unlock_bh(&adapter->req_q_lock);
2089 return retval;
2090 }
2091
2092 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2093 {
2094 lat_rec->sum += lat;
2095 lat_rec->min = min(lat_rec->min, lat);
2096 lat_rec->max = max(lat_rec->max, lat);
2097 }
2098
2099 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2100 {
2101 struct fsf_qual_latency_info *lat_inf;
2102 struct latency_cont *lat;
2103 struct zfcp_unit *unit = req->unit;
2104
2105 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
2106
2107 switch (req->qtcb->bottom.io.data_direction) {
2108 case FSF_DATADIR_READ:
2109 lat = &unit->latencies.read;
2110 break;
2111 case FSF_DATADIR_WRITE:
2112 lat = &unit->latencies.write;
2113 break;
2114 case FSF_DATADIR_CMND:
2115 lat = &unit->latencies.cmd;
2116 break;
2117 default:
2118 return;
2119 }
2120
2121 spin_lock(&unit->latencies.lock);
2122 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2123 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2124 lat->counter++;
2125 spin_unlock(&unit->latencies.lock);
2126 }
2127
2128 #ifdef CONFIG_BLK_DEV_IO_TRACE
2129 static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2130 {
2131 struct fsf_qual_latency_info *lat_inf;
2132 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2133 struct request *req = scsi_cmnd->request;
2134 struct zfcp_blk_drv_data trace;
2135 int ticks = fsf_req->adapter->timer_ticks;
2136
2137 trace.flags = 0;
2138 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2139 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2140 trace.flags |= ZFCP_BLK_LAT_VALID;
2141 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
2142 trace.channel_lat = lat_inf->channel_lat * ticks;
2143 trace.fabric_lat = lat_inf->fabric_lat * ticks;
2144 }
2145 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2146 trace.flags |= ZFCP_BLK_REQ_ERROR;
2147 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2148 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2149
2150 blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2151 }
2152 #else
2153 static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2154 {
2155 }
2156 #endif
2157
2158 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2159 {
2160 struct scsi_cmnd *scpnt;
2161 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2162 &(req->qtcb->bottom.io.fcp_rsp);
2163 u32 sns_len;
2164 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2165 unsigned long flags;
2166
2167 read_lock_irqsave(&req->adapter->abort_lock, flags);
2168
2169 scpnt = req->data;
2170 if (unlikely(!scpnt)) {
2171 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2172 return;
2173 }
2174
2175 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2176 set_host_byte(scpnt, DID_SOFT_ERROR);
2177 goto skip_fsfstatus;
2178 }
2179
2180 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2181 set_host_byte(scpnt, DID_ERROR);
2182 goto skip_fsfstatus;
2183 }
2184
2185 set_msg_byte(scpnt, COMMAND_COMPLETE);
2186
2187 scpnt->result |= fcp_rsp_iu->scsi_status;
2188
2189 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2190 zfcp_fsf_req_latency(req);
2191
2192 zfcp_fsf_trace_latency(req);
2193
2194 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2195 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2196 set_host_byte(scpnt, DID_OK);
2197 else {
2198 set_host_byte(scpnt, DID_ERROR);
2199 goto skip_fsfstatus;
2200 }
2201 }
2202
2203 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2204 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2205 fcp_rsp_iu->fcp_rsp_len;
2206 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2207 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2208
2209 memcpy(scpnt->sense_buffer,
2210 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2211 }
2212
2213 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2214 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2215 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2216 scpnt->underflow)
2217 set_host_byte(scpnt, DID_ERROR);
2218 }
2219 skip_fsfstatus:
2220 if (scpnt->result != 0)
2221 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
2222 else if (scpnt->retries > 0)
2223 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
2224 else
2225 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
2226
2227 scpnt->host_scribble = NULL;
2228 (scpnt->scsi_done) (scpnt);
2229 /*
2230 * We must hold this lock until scsi_done has been called.
2231 * Otherwise we may call scsi_done after abort regarding this
2232 * command has completed.
2233 * Note: scsi_done must not block!
2234 */
2235 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2236 }
2237
2238 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2239 {
2240 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2241 &(req->qtcb->bottom.io.fcp_rsp);
2242 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2243
2244 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
2245 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2246 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2247 }
2248
2249
2250 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2251 {
2252 struct zfcp_unit *unit;
2253 struct fsf_qtcb_header *header = &req->qtcb->header;
2254
2255 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2256 unit = req->data;
2257 else
2258 unit = req->unit;
2259
2260 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2261 goto skip_fsfstatus;
2262
2263 switch (header->fsf_status) {
2264 case FSF_HANDLE_MISMATCH:
2265 case FSF_PORT_HANDLE_NOT_VALID:
2266 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2267 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2268 break;
2269 case FSF_FCPLUN_NOT_VALID:
2270 case FSF_LUN_HANDLE_NOT_VALID:
2271 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2272 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2273 break;
2274 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2275 zfcp_fsf_class_not_supp(req);
2276 break;
2277 case FSF_ACCESS_DENIED:
2278 zfcp_fsf_access_denied_unit(req, unit);
2279 break;
2280 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2281 dev_err(&req->adapter->ccw_device->dev,
2282 "Incorrect direction %d, unit 0x%016Lx on port "
2283 "0x%016Lx closed\n",
2284 req->qtcb->bottom.io.data_direction,
2285 (unsigned long long)unit->fcp_lun,
2286 (unsigned long long)unit->port->wwpn);
2287 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2288 req);
2289 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2290 break;
2291 case FSF_CMND_LENGTH_NOT_VALID:
2292 dev_err(&req->adapter->ccw_device->dev,
2293 "Incorrect CDB length %d, unit 0x%016Lx on "
2294 "port 0x%016Lx closed\n",
2295 req->qtcb->bottom.io.fcp_cmnd_length,
2296 (unsigned long long)unit->fcp_lun,
2297 (unsigned long long)unit->port->wwpn);
2298 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2299 req);
2300 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2301 break;
2302 case FSF_PORT_BOXED:
2303 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2304 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2305 ZFCP_STATUS_FSFREQ_RETRY;
2306 break;
2307 case FSF_LUN_BOXED:
2308 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2309 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2310 ZFCP_STATUS_FSFREQ_RETRY;
2311 break;
2312 case FSF_ADAPTER_STATUS_AVAILABLE:
2313 if (header->fsf_status_qual.word[0] ==
2314 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2315 zfcp_test_link(unit->port);
2316 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2317 break;
2318 }
2319 skip_fsfstatus:
2320 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2321 zfcp_fsf_send_fcp_ctm_handler(req);
2322 else {
2323 zfcp_fsf_send_fcp_command_task_handler(req);
2324 req->unit = NULL;
2325 zfcp_unit_put(unit);
2326 }
2327 }
2328
2329 static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2330 {
2331 u32 *fcp_dl_ptr;
2332
2333 /*
2334 * fcp_dl_addr = start address of fcp_cmnd structure +
2335 * size of fixed part + size of dynamically sized add_dcp_cdb field
2336 * SEE FCP-2 documentation
2337 */
2338 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2339 (fcp_cmd->add_fcp_cdb_length << 2));
2340 *fcp_dl_ptr = fcp_dl;
2341 }
2342
2343 /**
2344 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2345 * @unit: unit where command is sent to
2346 * @scsi_cmnd: scsi command to be sent
2347 */
2348 int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2349 struct scsi_cmnd *scsi_cmnd)
2350 {
2351 struct zfcp_fsf_req *req;
2352 struct fcp_cmnd_iu *fcp_cmnd_iu;
2353 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2354 int real_bytes, retval = -EIO;
2355 struct zfcp_adapter *adapter = unit->port->adapter;
2356
2357 if (unlikely(!(atomic_read(&unit->status) &
2358 ZFCP_STATUS_COMMON_UNBLOCKED)))
2359 return -EBUSY;
2360
2361 spin_lock(&adapter->req_q_lock);
2362 if (atomic_read(&adapter->req_q.count) <= 0) {
2363 atomic_inc(&adapter->qdio_outb_full);
2364 goto out;
2365 }
2366
2367 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
2368 adapter->pool.scsi_req);
2369
2370 if (IS_ERR(req)) {
2371 retval = PTR_ERR(req);
2372 goto out;
2373 }
2374
2375 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2376 zfcp_unit_get(unit);
2377 req->unit = unit;
2378 req->data = scsi_cmnd;
2379 req->handler = zfcp_fsf_send_fcp_command_handler;
2380 req->qtcb->header.lun_handle = unit->handle;
2381 req->qtcb->header.port_handle = unit->port->handle;
2382 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2383
2384 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2385
2386 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2387 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2388 /*
2389 * set depending on data direction:
2390 * data direction bits in SBALE (SB Type)
2391 * data direction bits in QTCB
2392 * data direction bits in FCP_CMND IU
2393 */
2394 switch (scsi_cmnd->sc_data_direction) {
2395 case DMA_NONE:
2396 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2397 break;
2398 case DMA_FROM_DEVICE:
2399 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2400 fcp_cmnd_iu->rddata = 1;
2401 break;
2402 case DMA_TO_DEVICE:
2403 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2404 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2405 fcp_cmnd_iu->wddata = 1;
2406 break;
2407 case DMA_BIDIRECTIONAL:
2408 goto failed_scsi_cmnd;
2409 }
2410
2411 if (likely((scsi_cmnd->device->simple_tags) ||
2412 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
2413 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2414 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2415 else
2416 fcp_cmnd_iu->task_attribute = UNTAGGED;
2417
2418 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2419 fcp_cmnd_iu->add_fcp_cdb_length =
2420 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2421
2422 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2423
2424 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2425 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2426
2427 real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype,
2428 scsi_sglist(scsi_cmnd),
2429 FSF_MAX_SBALS_PER_REQ);
2430 if (unlikely(real_bytes < 0)) {
2431 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2432 dev_err(&adapter->ccw_device->dev,
2433 "Oversize data package, unit 0x%016Lx "
2434 "on port 0x%016Lx closed\n",
2435 (unsigned long long)unit->fcp_lun,
2436 (unsigned long long)unit->port->wwpn);
2437 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2438 retval = -EINVAL;
2439 }
2440 goto failed_scsi_cmnd;
2441 }
2442
2443 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2444
2445 retval = zfcp_fsf_req_send(req);
2446 if (unlikely(retval))
2447 goto failed_scsi_cmnd;
2448
2449 goto out;
2450
2451 failed_scsi_cmnd:
2452 zfcp_unit_put(unit);
2453 zfcp_fsf_req_free(req);
2454 scsi_cmnd->host_scribble = NULL;
2455 out:
2456 spin_unlock(&adapter->req_q_lock);
2457 return retval;
2458 }
2459
2460 /**
2461 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2462 * @unit: pointer to struct zfcp_unit
2463 * @tm_flags: unsigned byte for task management flags
2464 * Returns: on success pointer to struct fsf_req, NULL otherwise
2465 */
2466 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2467 {
2468 struct qdio_buffer_element *sbale;
2469 struct zfcp_fsf_req *req = NULL;
2470 struct fcp_cmnd_iu *fcp_cmnd_iu;
2471 struct zfcp_adapter *adapter = unit->port->adapter;
2472
2473 if (unlikely(!(atomic_read(&unit->status) &
2474 ZFCP_STATUS_COMMON_UNBLOCKED)))
2475 return NULL;
2476
2477 spin_lock_bh(&adapter->req_q_lock);
2478 if (zfcp_fsf_req_sbal_get(adapter))
2479 goto out;
2480
2481 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
2482 adapter->pool.scsi_req);
2483
2484 if (IS_ERR(req)) {
2485 req = NULL;
2486 goto out;
2487 }
2488
2489 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2490 req->data = unit;
2491 req->handler = zfcp_fsf_send_fcp_command_handler;
2492 req->qtcb->header.lun_handle = unit->handle;
2493 req->qtcb->header.port_handle = unit->port->handle;
2494 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2495 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2496 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2497 sizeof(u32);
2498
2499 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
2500 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2501 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2502
2503 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
2504 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2505 fcp_cmnd_iu->task_management_flags = tm_flags;
2506
2507 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2508 if (!zfcp_fsf_req_send(req))
2509 goto out;
2510
2511 zfcp_fsf_req_free(req);
2512 req = NULL;
2513 out:
2514 spin_unlock_bh(&adapter->req_q_lock);
2515 return req;
2516 }
2517
2518 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2519 {
2520 }
2521
2522 /**
2523 * zfcp_fsf_control_file - control file upload/download
2524 * @adapter: pointer to struct zfcp_adapter
2525 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2526 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2527 */
2528 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2529 struct zfcp_fsf_cfdc *fsf_cfdc)
2530 {
2531 struct qdio_buffer_element *sbale;
2532 struct zfcp_fsf_req *req = NULL;
2533 struct fsf_qtcb_bottom_support *bottom;
2534 int direction, retval = -EIO, bytes;
2535
2536 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2537 return ERR_PTR(-EOPNOTSUPP);
2538
2539 switch (fsf_cfdc->command) {
2540 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2541 direction = SBAL_FLAGS0_TYPE_WRITE;
2542 break;
2543 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2544 direction = SBAL_FLAGS0_TYPE_READ;
2545 break;
2546 default:
2547 return ERR_PTR(-EINVAL);
2548 }
2549
2550 spin_lock_bh(&adapter->req_q_lock);
2551 if (zfcp_fsf_req_sbal_get(adapter))
2552 goto out;
2553
2554 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL);
2555 if (IS_ERR(req)) {
2556 retval = -EPERM;
2557 goto out;
2558 }
2559
2560 req->handler = zfcp_fsf_control_file_handler;
2561
2562 sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req);
2563 sbale[0].flags |= direction;
2564
2565 bottom = &req->qtcb->bottom.support;
2566 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2567 bottom->option = fsf_cfdc->option;
2568
2569 bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction,
2570 fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ);
2571 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2572 zfcp_fsf_req_free(req);
2573 goto out;
2574 }
2575
2576 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2577 retval = zfcp_fsf_req_send(req);
2578 out:
2579 spin_unlock_bh(&adapter->req_q_lock);
2580
2581 if (!retval) {
2582 wait_for_completion(&req->completion);
2583 return req;
2584 }
2585 return ERR_PTR(retval);
2586 }
2587
2588 /**
2589 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2590 * @adapter: pointer to struct zfcp_adapter
2591 * @sbal_idx: response queue index of SBAL to be processed
2592 */
2593 void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx)
2594 {
2595 struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx];
2596 struct qdio_buffer_element *sbale;
2597 struct zfcp_fsf_req *fsf_req;
2598 unsigned long flags, req_id;
2599 int idx;
2600
2601 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2602
2603 sbale = &sbal->element[idx];
2604 req_id = (unsigned long) sbale->addr;
2605 spin_lock_irqsave(&adapter->req_list_lock, flags);
2606 fsf_req = zfcp_reqlist_find(adapter, req_id);
2607
2608 if (!fsf_req)
2609 /*
2610 * Unknown request means that we have potentially memory
2611 * corruption and must stop the machine immediately.
2612 */
2613 panic("error: unknown req_id (%lx) on adapter %s.\n",
2614 req_id, dev_name(&adapter->ccw_device->dev));
2615
2616 list_del(&fsf_req->list);
2617 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2618
2619 fsf_req->queue_req.sbal_response = sbal_idx;
2620 fsf_req->queue_req.qdio_inb_usage =
2621 atomic_read(&adapter->resp_q.count);
2622 zfcp_fsf_req_complete(fsf_req);
2623
2624 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2625 break;
2626 }
2627 }
This page took 0.119807 seconds and 4 git commands to generate.