[SCSI] zfcp: Apply common naming conventions to zfcp_fc
[deliverable/linux.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2009
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include "zfcp_ext.h"
14 #include "zfcp_dbf.h"
15
16 static void zfcp_fsf_request_timeout_handler(unsigned long data)
17 {
18 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
19 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
20 "fsrth_1", NULL);
21 }
22
23 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
24 unsigned long timeout)
25 {
26 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
27 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
28 fsf_req->timer.expires = jiffies + timeout;
29 add_timer(&fsf_req->timer);
30 }
31
32 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
33 {
34 BUG_ON(!fsf_req->erp_action);
35 fsf_req->timer.function = zfcp_erp_timeout_handler;
36 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
37 fsf_req->timer.expires = jiffies + 30 * HZ;
38 add_timer(&fsf_req->timer);
39 }
40
41 /* association between FSF command and FSF QTCB type */
42 static u32 fsf_qtcb_type[] = {
43 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
44 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
45 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
46 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
47 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
48 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
49 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
50 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
52 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
53 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
54 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
56 };
57
58 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
59 {
60 u16 subtable = table >> 16;
61 u16 rule = table & 0xffff;
62 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
63
64 if (subtable && subtable < ARRAY_SIZE(act_type))
65 dev_warn(&adapter->ccw_device->dev,
66 "Access denied according to ACT rule type %s, "
67 "rule %d\n", act_type[subtable], rule);
68 }
69
70 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
71 struct zfcp_port *port)
72 {
73 struct fsf_qtcb_header *header = &req->qtcb->header;
74 dev_warn(&req->adapter->ccw_device->dev,
75 "Access denied to port 0x%016Lx\n",
76 (unsigned long long)port->wwpn);
77 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
78 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
79 zfcp_erp_port_access_denied(port, "fspad_1", req);
80 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
81 }
82
83 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
84 struct zfcp_unit *unit)
85 {
86 struct fsf_qtcb_header *header = &req->qtcb->header;
87 dev_warn(&req->adapter->ccw_device->dev,
88 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
89 (unsigned long long)unit->fcp_lun,
90 (unsigned long long)unit->port->wwpn);
91 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
92 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
93 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
94 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
95 }
96
97 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
98 {
99 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
100 "operational because of an unsupported FC class\n");
101 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
102 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
103 }
104
105 /**
106 * zfcp_fsf_req_free - free memory used by fsf request
107 * @fsf_req: pointer to struct zfcp_fsf_req
108 */
109 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
110 {
111 if (likely(req->pool)) {
112 if (likely(req->qtcb))
113 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
114 mempool_free(req, req->pool);
115 return;
116 }
117
118 if (likely(req->qtcb))
119 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
120 kfree(req);
121 }
122
123 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124 {
125 struct fsf_status_read_buffer *sr_buf = req->data;
126 struct zfcp_adapter *adapter = req->adapter;
127 struct zfcp_port *port;
128 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
129 unsigned long flags;
130
131 read_lock_irqsave(&zfcp_data.config_lock, flags);
132 list_for_each_entry(port, &adapter->port_list_head, list)
133 if (port->d_id == d_id) {
134 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 return;
137 }
138 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
139 }
140
141 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down)
143 {
144 struct zfcp_adapter *adapter = req->adapter;
145 unsigned long flags;
146
147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 return;
149
150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151
152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 zfcp_scsi_schedule_rports_block(adapter);
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155
156 if (!link_down)
157 goto out;
158
159 switch (link_down->error_code) {
160 case FSF_PSQ_LINK_NO_LIGHT:
161 dev_warn(&req->adapter->ccw_device->dev,
162 "There is no light signal from the local "
163 "fibre channel cable\n");
164 break;
165 case FSF_PSQ_LINK_WRAP_PLUG:
166 dev_warn(&req->adapter->ccw_device->dev,
167 "There is a wrap plug instead of a fibre "
168 "channel cable\n");
169 break;
170 case FSF_PSQ_LINK_NO_FCP:
171 dev_warn(&req->adapter->ccw_device->dev,
172 "The adjacent fibre channel node does not "
173 "support FCP\n");
174 break;
175 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
176 dev_warn(&req->adapter->ccw_device->dev,
177 "The FCP device is suspended because of a "
178 "firmware update\n");
179 break;
180 case FSF_PSQ_LINK_INVALID_WWPN:
181 dev_warn(&req->adapter->ccw_device->dev,
182 "The FCP device detected a WWPN that is "
183 "duplicate or not valid\n");
184 break;
185 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
186 dev_warn(&req->adapter->ccw_device->dev,
187 "The fibre channel fabric does not support NPIV\n");
188 break;
189 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
190 dev_warn(&req->adapter->ccw_device->dev,
191 "The FCP adapter cannot support more NPIV ports\n");
192 break;
193 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
194 dev_warn(&req->adapter->ccw_device->dev,
195 "The adjacent switch cannot support "
196 "more NPIV ports\n");
197 break;
198 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
199 dev_warn(&req->adapter->ccw_device->dev,
200 "The FCP adapter could not log in to the "
201 "fibre channel fabric\n");
202 break;
203 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
204 dev_warn(&req->adapter->ccw_device->dev,
205 "The WWPN assignment file on the FCP adapter "
206 "has been damaged\n");
207 break;
208 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
209 dev_warn(&req->adapter->ccw_device->dev,
210 "The mode table on the FCP adapter "
211 "has been damaged\n");
212 break;
213 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
214 dev_warn(&req->adapter->ccw_device->dev,
215 "All NPIV ports on the FCP adapter have "
216 "been assigned\n");
217 break;
218 default:
219 dev_warn(&req->adapter->ccw_device->dev,
220 "The link between the FCP adapter and "
221 "the FC fabric is down\n");
222 }
223 out:
224 zfcp_erp_adapter_failed(adapter, id, req);
225 }
226
227 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
228 {
229 struct fsf_status_read_buffer *sr_buf = req->data;
230 struct fsf_link_down_info *ldi =
231 (struct fsf_link_down_info *) &sr_buf->payload;
232
233 switch (sr_buf->status_subtype) {
234 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
235 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
236 break;
237 case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
239 break;
240 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
242 };
243 }
244
245 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
246 {
247 struct zfcp_adapter *adapter = req->adapter;
248 struct fsf_status_read_buffer *sr_buf = req->data;
249
250 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
251 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
252 mempool_free(sr_buf, adapter->pool.status_read_data);
253 zfcp_fsf_req_free(req);
254 return;
255 }
256
257 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
258
259 switch (sr_buf->status_type) {
260 case FSF_STATUS_READ_PORT_CLOSED:
261 zfcp_fsf_status_read_port_closed(req);
262 break;
263 case FSF_STATUS_READ_INCOMING_ELS:
264 zfcp_fc_incoming_els(req);
265 break;
266 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
267 break;
268 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
269 dev_warn(&adapter->ccw_device->dev,
270 "The error threshold for checksum statistics "
271 "has been exceeded\n");
272 zfcp_dbf_hba_berr(adapter->dbf, req);
273 break;
274 case FSF_STATUS_READ_LINK_DOWN:
275 zfcp_fsf_status_read_link_down(req);
276 break;
277 case FSF_STATUS_READ_LINK_UP:
278 dev_info(&adapter->ccw_device->dev,
279 "The local link has been restored\n");
280 /* All ports should be marked as ready to run again */
281 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
282 ZFCP_STATUS_COMMON_RUNNING,
283 ZFCP_SET);
284 zfcp_erp_adapter_reopen(adapter,
285 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
286 ZFCP_STATUS_COMMON_ERP_FAILED,
287 "fssrh_2", req);
288 break;
289 case FSF_STATUS_READ_NOTIFICATION_LOST:
290 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
291 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 req);
293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 schedule_work(&adapter->scan_work);
295 break;
296 case FSF_STATUS_READ_CFDC_UPDATED:
297 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
298 break;
299 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
300 adapter->adapter_features = sr_buf->payload.word[0];
301 break;
302 }
303
304 mempool_free(sr_buf, adapter->pool.status_read_data);
305 zfcp_fsf_req_free(req);
306
307 atomic_inc(&adapter->stat_miss);
308 queue_work(adapter->work_queue, &adapter->stat_work);
309 }
310
311 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
312 {
313 switch (req->qtcb->header.fsf_status_qual.word[0]) {
314 case FSF_SQ_FCP_RSP_AVAILABLE:
315 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
316 case FSF_SQ_NO_RETRY_POSSIBLE:
317 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 return;
319 case FSF_SQ_COMMAND_ABORTED:
320 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 break;
322 case FSF_SQ_NO_RECOM:
323 dev_err(&req->adapter->ccw_device->dev,
324 "The FCP adapter reported a problem "
325 "that cannot be recovered\n");
326 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
327 break;
328 }
329 /* all non-return stats set FSFREQ_ERROR*/
330 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
331 }
332
333 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
334 {
335 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
336 return;
337
338 switch (req->qtcb->header.fsf_status) {
339 case FSF_UNKNOWN_COMMAND:
340 dev_err(&req->adapter->ccw_device->dev,
341 "The FCP adapter does not recognize the command 0x%x\n",
342 req->qtcb->header.fsf_command);
343 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
344 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
345 break;
346 case FSF_ADAPTER_STATUS_AVAILABLE:
347 zfcp_fsf_fsfstatus_qual_eval(req);
348 break;
349 }
350 }
351
352 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
353 {
354 struct zfcp_adapter *adapter = req->adapter;
355 struct fsf_qtcb *qtcb = req->qtcb;
356 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
357
358 zfcp_dbf_hba_fsf_response(req);
359
360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
362 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 return;
364 }
365
366 switch (qtcb->prefix.prot_status) {
367 case FSF_PROT_GOOD:
368 case FSF_PROT_FSF_STATUS_PRESENTED:
369 return;
370 case FSF_PROT_QTCB_VERSION_ERROR:
371 dev_err(&adapter->ccw_device->dev,
372 "QTCB version 0x%x not supported by FCP adapter "
373 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
374 psq->word[0], psq->word[1]);
375 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
376 break;
377 case FSF_PROT_ERROR_STATE:
378 case FSF_PROT_SEQ_NUMB_ERROR:
379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
381 break;
382 case FSF_PROT_UNSUPP_QTCB_TYPE:
383 dev_err(&adapter->ccw_device->dev,
384 "The QTCB type is not supported by the FCP adapter\n");
385 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
386 break;
387 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
388 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
389 &adapter->status);
390 break;
391 case FSF_PROT_DUPLICATE_REQUEST_ID:
392 dev_err(&adapter->ccw_device->dev,
393 "0x%Lx is an ambiguous request identifier\n",
394 (unsigned long long)qtcb->bottom.support.req_handle);
395 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
396 break;
397 case FSF_PROT_LINK_DOWN:
398 zfcp_fsf_link_down_info_eval(req, "fspse_5",
399 &psq->link_down_info);
400 /* FIXME: reopening adapter now? better wait for link up */
401 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
402 break;
403 case FSF_PROT_REEST_QUEUE:
404 /* All ports should be marked as ready to run again */
405 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
406 ZFCP_STATUS_COMMON_RUNNING,
407 ZFCP_SET);
408 zfcp_erp_adapter_reopen(adapter,
409 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
410 ZFCP_STATUS_COMMON_ERP_FAILED,
411 "fspse_8", req);
412 break;
413 default:
414 dev_err(&adapter->ccw_device->dev,
415 "0x%x is not a valid transfer protocol status\n",
416 qtcb->prefix.prot_status);
417 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
418 }
419 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
420 }
421
422 /**
423 * zfcp_fsf_req_complete - process completion of a FSF request
424 * @fsf_req: The FSF request that has been completed.
425 *
426 * When a request has been completed either from the FCP adapter,
427 * or it has been dismissed due to a queue shutdown, this function
428 * is called to process the completion status and trigger further
429 * events related to the FSF request.
430 */
431 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
432 {
433 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
434 zfcp_fsf_status_read_handler(req);
435 return;
436 }
437
438 del_timer(&req->timer);
439 zfcp_fsf_protstatus_eval(req);
440 zfcp_fsf_fsfstatus_eval(req);
441 req->handler(req);
442
443 if (req->erp_action)
444 zfcp_erp_notify(req->erp_action, 0);
445
446 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
447 zfcp_fsf_req_free(req);
448 else
449 complete(&req->completion);
450 }
451
452 /**
453 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
454 * @adapter: pointer to struct zfcp_adapter
455 *
456 * Never ever call this without shutting down the adapter first.
457 * Otherwise the adapter would continue using and corrupting s390 storage.
458 * Included BUG_ON() call to ensure this is done.
459 * ERP is supposed to be the only user of this function.
460 */
461 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462 {
463 struct zfcp_fsf_req *req, *tmp;
464 unsigned long flags;
465 LIST_HEAD(remove_queue);
466 unsigned int i;
467
468 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 spin_lock_irqsave(&adapter->req_list_lock, flags);
470 for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 list_splice_init(&adapter->req_list[i], &remove_queue);
472 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473
474 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 list_del(&req->list);
476 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 zfcp_fsf_req_complete(req);
478 }
479 }
480
481 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482 {
483 struct fsf_qtcb_bottom_config *bottom;
484 struct zfcp_adapter *adapter = req->adapter;
485 struct Scsi_Host *shost = adapter->scsi_host;
486
487 bottom = &req->qtcb->bottom.config;
488
489 if (req->data)
490 memcpy(req->data, bottom, sizeof(*bottom));
491
492 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
493 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
494 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
495 fc_host_speed(shost) = bottom->fc_link_speed;
496 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
497
498 adapter->hydra_version = bottom->adapter_type;
499 adapter->timer_ticks = bottom->timer_interval;
500
501 if (fc_host_permanent_port_name(shost) == -1)
502 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
503
504 switch (bottom->fc_topology) {
505 case FSF_TOPO_P2P:
506 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
507 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
508 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
509 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 break;
511 case FSF_TOPO_FABRIC:
512 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
513 break;
514 case FSF_TOPO_AL:
515 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
516 /* fall through */
517 default:
518 dev_err(&adapter->ccw_device->dev,
519 "Unknown or unsupported arbitrated loop "
520 "fibre channel topology detected\n");
521 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
522 return -EIO;
523 }
524
525 return 0;
526 }
527
528 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
529 {
530 struct zfcp_adapter *adapter = req->adapter;
531 struct fsf_qtcb *qtcb = req->qtcb;
532 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
533 struct Scsi_Host *shost = adapter->scsi_host;
534
535 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
536 return;
537
538 adapter->fsf_lic_version = bottom->lic_version;
539 adapter->adapter_features = bottom->adapter_features;
540 adapter->connection_features = bottom->connection_features;
541 adapter->peer_wwpn = 0;
542 adapter->peer_wwnn = 0;
543 adapter->peer_d_id = 0;
544
545 switch (qtcb->header.fsf_status) {
546 case FSF_GOOD:
547 if (zfcp_fsf_exchange_config_evaluate(req))
548 return;
549
550 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
551 dev_err(&adapter->ccw_device->dev,
552 "FCP adapter maximum QTCB size (%d bytes) "
553 "is too small\n",
554 bottom->max_qtcb_size);
555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
556 return;
557 }
558 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
559 &adapter->status);
560 break;
561 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
562 fc_host_node_name(shost) = 0;
563 fc_host_port_name(shost) = 0;
564 fc_host_port_id(shost) = 0;
565 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
566 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
567 adapter->hydra_version = 0;
568
569 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
570 &adapter->status);
571
572 zfcp_fsf_link_down_info_eval(req, "fsecdh2",
573 &qtcb->header.fsf_status_qual.link_down_info);
574 break;
575 default:
576 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
577 return;
578 }
579
580 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
581 adapter->hardware_version = bottom->hardware_version;
582 memcpy(fc_host_serial_number(shost), bottom->serial_number,
583 min(FC_SERIAL_NUMBER_SIZE, 17));
584 EBCASC(fc_host_serial_number(shost),
585 min(FC_SERIAL_NUMBER_SIZE, 17));
586 }
587
588 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
589 dev_err(&adapter->ccw_device->dev,
590 "The FCP adapter only supports newer "
591 "control block versions\n");
592 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
593 return;
594 }
595 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
596 dev_err(&adapter->ccw_device->dev,
597 "The FCP adapter only supports older "
598 "control block versions\n");
599 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
600 }
601 }
602
603 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
604 {
605 struct zfcp_adapter *adapter = req->adapter;
606 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
607 struct Scsi_Host *shost = adapter->scsi_host;
608
609 if (req->data)
610 memcpy(req->data, bottom, sizeof(*bottom));
611
612 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
613 fc_host_permanent_port_name(shost) = bottom->wwpn;
614 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
615 } else
616 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
617 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
618 fc_host_supported_speeds(shost) = bottom->supported_speed;
619 }
620
621 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
622 {
623 struct fsf_qtcb *qtcb = req->qtcb;
624
625 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
626 return;
627
628 switch (qtcb->header.fsf_status) {
629 case FSF_GOOD:
630 zfcp_fsf_exchange_port_evaluate(req);
631 break;
632 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
633 zfcp_fsf_exchange_port_evaluate(req);
634 zfcp_fsf_link_down_info_eval(req, "fsepdh1",
635 &qtcb->header.fsf_status_qual.link_down_info);
636 break;
637 }
638 }
639
640 static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
641 {
642 struct zfcp_qdio_queue *req_q = &qdio->req_q;
643
644 spin_lock_bh(&qdio->req_q_lock);
645 if (atomic_read(&req_q->count))
646 return 1;
647 spin_unlock_bh(&qdio->req_q_lock);
648 return 0;
649 }
650
651 static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
652 {
653 struct zfcp_adapter *adapter = qdio->adapter;
654 long ret;
655
656 spin_unlock_bh(&qdio->req_q_lock);
657 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
658 zfcp_fsf_sbal_check(qdio), 5 * HZ);
659 if (ret > 0)
660 return 0;
661 if (!ret) {
662 atomic_inc(&qdio->req_q_full);
663 /* assume hanging outbound queue, try queue recovery */
664 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
665 }
666
667 spin_lock_bh(&qdio->req_q_lock);
668 return -EIO;
669 }
670
671 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
672 {
673 struct zfcp_fsf_req *req;
674
675 if (likely(pool))
676 req = mempool_alloc(pool, GFP_ATOMIC);
677 else
678 req = kmalloc(sizeof(*req), GFP_ATOMIC);
679
680 if (unlikely(!req))
681 return NULL;
682
683 memset(req, 0, sizeof(*req));
684 req->pool = pool;
685 return req;
686 }
687
688 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
689 {
690 struct fsf_qtcb *qtcb;
691
692 if (likely(pool))
693 qtcb = mempool_alloc(pool, GFP_ATOMIC);
694 else
695 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
696
697 if (unlikely(!qtcb))
698 return NULL;
699
700 memset(qtcb, 0, sizeof(*qtcb));
701 return qtcb;
702 }
703
704 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
705 u32 fsf_cmd, mempool_t *pool)
706 {
707 struct qdio_buffer_element *sbale;
708 struct zfcp_qdio_queue *req_q = &qdio->req_q;
709 struct zfcp_adapter *adapter = qdio->adapter;
710 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
711
712 if (unlikely(!req))
713 return ERR_PTR(-ENOMEM);
714
715 if (adapter->req_no == 0)
716 adapter->req_no++;
717
718 INIT_LIST_HEAD(&req->list);
719 init_timer(&req->timer);
720 init_completion(&req->completion);
721
722 req->adapter = adapter;
723 req->fsf_command = fsf_cmd;
724 req->req_id = adapter->req_no;
725 req->queue_req.sbal_number = 1;
726 req->queue_req.sbal_first = req_q->first;
727 req->queue_req.sbal_last = req_q->first;
728 req->queue_req.sbale_curr = 1;
729
730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
731 sbale[0].addr = (void *) req->req_id;
732 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
733
734 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
735 if (likely(pool))
736 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
737 else
738 req->qtcb = zfcp_qtcb_alloc(NULL);
739
740 if (unlikely(!req->qtcb)) {
741 zfcp_fsf_req_free(req);
742 return ERR_PTR(-ENOMEM);
743 }
744
745 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
746 req->qtcb->prefix.req_id = req->req_id;
747 req->qtcb->prefix.ulp_info = 26;
748 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
749 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
750 req->qtcb->header.req_handle = req->req_id;
751 req->qtcb->header.fsf_command = req->fsf_command;
752 req->seq_no = adapter->fsf_req_seq_no;
753 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
754 sbale[1].addr = (void *) req->qtcb;
755 sbale[1].length = sizeof(struct fsf_qtcb);
756 }
757
758 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
759 zfcp_fsf_req_free(req);
760 return ERR_PTR(-EIO);
761 }
762
763 return req;
764 }
765
766 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
767 {
768 struct zfcp_adapter *adapter = req->adapter;
769 struct zfcp_qdio *qdio = adapter->qdio;
770 unsigned long flags;
771 int idx;
772 int with_qtcb = (req->qtcb != NULL);
773
774 /* put allocated FSF request into hash table */
775 spin_lock_irqsave(&adapter->req_list_lock, flags);
776 idx = zfcp_reqlist_hash(req->req_id);
777 list_add_tail(&req->list, &adapter->req_list[idx]);
778 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
779
780 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
781 req->issued = get_clock();
782 if (zfcp_qdio_send(qdio, &req->queue_req)) {
783 del_timer(&req->timer);
784 spin_lock_irqsave(&adapter->req_list_lock, flags);
785 /* lookup request again, list might have changed */
786 if (zfcp_reqlist_find_safe(adapter, req))
787 zfcp_reqlist_remove(adapter, req);
788 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
789 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
790 return -EIO;
791 }
792
793 /* Don't increase for unsolicited status */
794 if (with_qtcb)
795 adapter->fsf_req_seq_no++;
796 adapter->req_no++;
797
798 return 0;
799 }
800
801 /**
802 * zfcp_fsf_status_read - send status read request
803 * @adapter: pointer to struct zfcp_adapter
804 * @req_flags: request flags
805 * Returns: 0 on success, ERROR otherwise
806 */
807 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
808 {
809 struct zfcp_adapter *adapter = qdio->adapter;
810 struct zfcp_fsf_req *req;
811 struct fsf_status_read_buffer *sr_buf;
812 struct qdio_buffer_element *sbale;
813 int retval = -EIO;
814
815 spin_lock_bh(&qdio->req_q_lock);
816 if (zfcp_fsf_req_sbal_get(qdio))
817 goto out;
818
819 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
820 adapter->pool.status_read_req);
821 if (IS_ERR(req)) {
822 retval = PTR_ERR(req);
823 goto out;
824 }
825
826 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
827 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
828 req->queue_req.sbale_curr = 2;
829
830 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
831 if (!sr_buf) {
832 retval = -ENOMEM;
833 goto failed_buf;
834 }
835 memset(sr_buf, 0, sizeof(*sr_buf));
836 req->data = sr_buf;
837 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
838 sbale->addr = (void *) sr_buf;
839 sbale->length = sizeof(*sr_buf);
840
841 retval = zfcp_fsf_req_send(req);
842 if (retval)
843 goto failed_req_send;
844
845 goto out;
846
847 failed_req_send:
848 mempool_free(sr_buf, adapter->pool.status_read_data);
849 failed_buf:
850 zfcp_fsf_req_free(req);
851 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
852 out:
853 spin_unlock_bh(&qdio->req_q_lock);
854 return retval;
855 }
856
857 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
858 {
859 struct zfcp_unit *unit = req->data;
860 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
861
862 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
863 return;
864
865 switch (req->qtcb->header.fsf_status) {
866 case FSF_PORT_HANDLE_NOT_VALID:
867 if (fsq->word[0] == fsq->word[1]) {
868 zfcp_erp_adapter_reopen(unit->port->adapter, 0,
869 "fsafch1", req);
870 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
871 }
872 break;
873 case FSF_LUN_HANDLE_NOT_VALID:
874 if (fsq->word[0] == fsq->word[1]) {
875 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
876 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
877 }
878 break;
879 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
880 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
881 break;
882 case FSF_PORT_BOXED:
883 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
884 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
885 ZFCP_STATUS_FSFREQ_RETRY;
886 break;
887 case FSF_LUN_BOXED:
888 zfcp_erp_unit_boxed(unit, "fsafch4", req);
889 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
890 ZFCP_STATUS_FSFREQ_RETRY;
891 break;
892 case FSF_ADAPTER_STATUS_AVAILABLE:
893 switch (fsq->word[0]) {
894 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
895 zfcp_fc_test_link(unit->port);
896 /* fall through */
897 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
898 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
899 break;
900 }
901 break;
902 case FSF_GOOD:
903 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
904 break;
905 }
906 }
907
908 /**
909 * zfcp_fsf_abort_fcp_command - abort running SCSI command
910 * @old_req_id: unsigned long
911 * @unit: pointer to struct zfcp_unit
912 * Returns: pointer to struct zfcp_fsf_req
913 */
914
915 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
916 struct zfcp_unit *unit)
917 {
918 struct qdio_buffer_element *sbale;
919 struct zfcp_fsf_req *req = NULL;
920 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
921
922 spin_lock_bh(&qdio->req_q_lock);
923 if (zfcp_fsf_req_sbal_get(qdio))
924 goto out;
925 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
926 qdio->adapter->pool.scsi_abort);
927 if (IS_ERR(req)) {
928 req = NULL;
929 goto out;
930 }
931
932 if (unlikely(!(atomic_read(&unit->status) &
933 ZFCP_STATUS_COMMON_UNBLOCKED)))
934 goto out_error_free;
935
936 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
937 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
938 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
939
940 req->data = unit;
941 req->handler = zfcp_fsf_abort_fcp_command_handler;
942 req->qtcb->header.lun_handle = unit->handle;
943 req->qtcb->header.port_handle = unit->port->handle;
944 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
945
946 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
947 if (!zfcp_fsf_req_send(req))
948 goto out;
949
950 out_error_free:
951 zfcp_fsf_req_free(req);
952 req = NULL;
953 out:
954 spin_unlock_bh(&qdio->req_q_lock);
955 return req;
956 }
957
958 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
959 {
960 struct zfcp_adapter *adapter = req->adapter;
961 struct zfcp_send_ct *send_ct = req->data;
962 struct fsf_qtcb_header *header = &req->qtcb->header;
963
964 send_ct->status = -EINVAL;
965
966 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
967 goto skip_fsfstatus;
968
969 switch (header->fsf_status) {
970 case FSF_GOOD:
971 zfcp_dbf_san_ct_response(req);
972 send_ct->status = 0;
973 break;
974 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
975 zfcp_fsf_class_not_supp(req);
976 break;
977 case FSF_ADAPTER_STATUS_AVAILABLE:
978 switch (header->fsf_status_qual.word[0]){
979 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
980 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
981 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
982 break;
983 }
984 break;
985 case FSF_ACCESS_DENIED:
986 break;
987 case FSF_PORT_BOXED:
988 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
989 ZFCP_STATUS_FSFREQ_RETRY;
990 break;
991 case FSF_PORT_HANDLE_NOT_VALID:
992 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
993 /* fall through */
994 case FSF_GENERIC_COMMAND_REJECTED:
995 case FSF_PAYLOAD_SIZE_MISMATCH:
996 case FSF_REQUEST_SIZE_TOO_LARGE:
997 case FSF_RESPONSE_SIZE_TOO_LARGE:
998 case FSF_SBAL_MISMATCH:
999 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1000 break;
1001 }
1002
1003 skip_fsfstatus:
1004 if (send_ct->handler)
1005 send_ct->handler(send_ct->handler_data);
1006 }
1007
1008 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1009 struct scatterlist *sg_req,
1010 struct scatterlist *sg_resp)
1011 {
1012 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1013 sbale[2].addr = sg_virt(sg_req);
1014 sbale[2].length = sg_req->length;
1015 sbale[3].addr = sg_virt(sg_resp);
1016 sbale[3].length = sg_resp->length;
1017 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1018 }
1019
1020 static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1021 {
1022 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1023 }
1024
1025 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1026 struct scatterlist *sg_req,
1027 struct scatterlist *sg_resp,
1028 int max_sbals)
1029 {
1030 struct zfcp_adapter *adapter = req->adapter;
1031 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 &req->queue_req);
1033 u32 feat = adapter->adapter_features;
1034 int bytes;
1035
1036 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1037 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1038 return -EOPNOTSUPP;
1039
1040 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1041 return 0;
1042 }
1043
1044 /* use single, unchained SBAL if it can hold the request */
1045 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1046 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1047 return 0;
1048 }
1049
1050 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1051 SBAL_FLAGS0_TYPE_WRITE_READ,
1052 sg_req, max_sbals);
1053 if (bytes <= 0)
1054 return -EIO;
1055 req->qtcb->bottom.support.req_buf_length = bytes;
1056 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals);
1061 if (bytes <= 0)
1062 return -EIO;
1063 req->qtcb->bottom.support.resp_buf_length = bytes;
1064
1065 return 0;
1066 }
1067
1068 /**
1069 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1070 * @ct: pointer to struct zfcp_send_ct with data for request
1071 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1072 */
1073 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1074 {
1075 struct zfcp_wka_port *wka_port = ct->wka_port;
1076 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1077 struct zfcp_fsf_req *req;
1078 int ret = -EIO;
1079
1080 spin_lock_bh(&qdio->req_q_lock);
1081 if (zfcp_fsf_req_sbal_get(qdio))
1082 goto out;
1083
1084 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
1085
1086 if (IS_ERR(req)) {
1087 ret = PTR_ERR(req);
1088 goto out;
1089 }
1090
1091 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1092 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
1093 FSF_MAX_SBALS_PER_REQ);
1094 if (ret)
1095 goto failed_send;
1096
1097 req->handler = zfcp_fsf_send_ct_handler;
1098 req->qtcb->header.port_handle = wka_port->handle;
1099 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1100 req->qtcb->bottom.support.timeout = ct->timeout;
1101 req->data = ct;
1102
1103 zfcp_dbf_san_ct_request(req);
1104 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1105
1106 ret = zfcp_fsf_req_send(req);
1107 if (ret)
1108 goto failed_send;
1109
1110 goto out;
1111
1112 failed_send:
1113 zfcp_fsf_req_free(req);
1114 out:
1115 spin_unlock_bh(&qdio->req_q_lock);
1116 return ret;
1117 }
1118
1119 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1120 {
1121 struct zfcp_send_els *send_els = req->data;
1122 struct zfcp_port *port = send_els->port;
1123 struct fsf_qtcb_header *header = &req->qtcb->header;
1124
1125 send_els->status = -EINVAL;
1126
1127 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1128 goto skip_fsfstatus;
1129
1130 switch (header->fsf_status) {
1131 case FSF_GOOD:
1132 zfcp_dbf_san_els_response(req);
1133 send_els->status = 0;
1134 break;
1135 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1136 zfcp_fsf_class_not_supp(req);
1137 break;
1138 case FSF_ADAPTER_STATUS_AVAILABLE:
1139 switch (header->fsf_status_qual.word[0]){
1140 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1141 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1142 zfcp_fc_test_link(port);
1143 /*fall through */
1144 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1145 case FSF_SQ_RETRY_IF_POSSIBLE:
1146 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1147 break;
1148 }
1149 break;
1150 case FSF_ELS_COMMAND_REJECTED:
1151 case FSF_PAYLOAD_SIZE_MISMATCH:
1152 case FSF_REQUEST_SIZE_TOO_LARGE:
1153 case FSF_RESPONSE_SIZE_TOO_LARGE:
1154 break;
1155 case FSF_ACCESS_DENIED:
1156 if (port)
1157 zfcp_fsf_access_denied_port(req, port);
1158 break;
1159 case FSF_SBAL_MISMATCH:
1160 /* should never occure, avoided in zfcp_fsf_send_els */
1161 /* fall through */
1162 default:
1163 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1164 break;
1165 }
1166 skip_fsfstatus:
1167 if (send_els->handler)
1168 send_els->handler(send_els->handler_data);
1169 }
1170
1171 /**
1172 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1173 * @els: pointer to struct zfcp_send_els with data for the command
1174 */
1175 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1176 {
1177 struct zfcp_fsf_req *req;
1178 struct zfcp_qdio *qdio = els->adapter->qdio;
1179 struct fsf_qtcb_bottom_support *bottom;
1180 int ret = -EIO;
1181
1182 spin_lock_bh(&qdio->req_q_lock);
1183 if (zfcp_fsf_req_sbal_get(qdio))
1184 goto out;
1185
1186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
1187
1188 if (IS_ERR(req)) {
1189 ret = PTR_ERR(req);
1190 goto out;
1191 }
1192
1193 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1194 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
1195
1196 if (ret)
1197 goto failed_send;
1198
1199 bottom = &req->qtcb->bottom.support;
1200 req->handler = zfcp_fsf_send_els_handler;
1201 bottom->d_id = els->d_id;
1202 bottom->service_class = FSF_CLASS_3;
1203 bottom->timeout = 2 * R_A_TOV;
1204 req->data = els;
1205
1206 zfcp_dbf_san_els_request(req);
1207
1208 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1209 ret = zfcp_fsf_req_send(req);
1210 if (ret)
1211 goto failed_send;
1212
1213 goto out;
1214
1215 failed_send:
1216 zfcp_fsf_req_free(req);
1217 out:
1218 spin_unlock_bh(&qdio->req_q_lock);
1219 return ret;
1220 }
1221
1222 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1223 {
1224 struct qdio_buffer_element *sbale;
1225 struct zfcp_fsf_req *req;
1226 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1227 int retval = -EIO;
1228
1229 spin_lock_bh(&qdio->req_q_lock);
1230 if (zfcp_fsf_req_sbal_get(qdio))
1231 goto out;
1232
1233 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1234 qdio->adapter->pool.erp_req);
1235
1236 if (IS_ERR(req)) {
1237 retval = PTR_ERR(req);
1238 goto out;
1239 }
1240
1241 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1242 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1243 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1244 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1245
1246 req->qtcb->bottom.config.feature_selection =
1247 FSF_FEATURE_CFDC |
1248 FSF_FEATURE_LUN_SHARING |
1249 FSF_FEATURE_NOTIFICATION_LOST |
1250 FSF_FEATURE_UPDATE_ALERT;
1251 req->erp_action = erp_action;
1252 req->handler = zfcp_fsf_exchange_config_data_handler;
1253 erp_action->fsf_req = req;
1254
1255 zfcp_fsf_start_erp_timer(req);
1256 retval = zfcp_fsf_req_send(req);
1257 if (retval) {
1258 zfcp_fsf_req_free(req);
1259 erp_action->fsf_req = NULL;
1260 }
1261 out:
1262 spin_unlock_bh(&qdio->req_q_lock);
1263 return retval;
1264 }
1265
1266 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1267 struct fsf_qtcb_bottom_config *data)
1268 {
1269 struct qdio_buffer_element *sbale;
1270 struct zfcp_fsf_req *req = NULL;
1271 int retval = -EIO;
1272
1273 spin_lock_bh(&qdio->req_q_lock);
1274 if (zfcp_fsf_req_sbal_get(qdio))
1275 goto out_unlock;
1276
1277 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1278
1279 if (IS_ERR(req)) {
1280 retval = PTR_ERR(req);
1281 goto out_unlock;
1282 }
1283
1284 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1285 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1286 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1287 req->handler = zfcp_fsf_exchange_config_data_handler;
1288
1289 req->qtcb->bottom.config.feature_selection =
1290 FSF_FEATURE_CFDC |
1291 FSF_FEATURE_LUN_SHARING |
1292 FSF_FEATURE_NOTIFICATION_LOST |
1293 FSF_FEATURE_UPDATE_ALERT;
1294
1295 if (data)
1296 req->data = data;
1297
1298 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1299 retval = zfcp_fsf_req_send(req);
1300 spin_unlock_bh(&qdio->req_q_lock);
1301 if (!retval)
1302 wait_for_completion(&req->completion);
1303
1304 zfcp_fsf_req_free(req);
1305 return retval;
1306
1307 out_unlock:
1308 spin_unlock_bh(&qdio->req_q_lock);
1309 return retval;
1310 }
1311
1312 /**
1313 * zfcp_fsf_exchange_port_data - request information about local port
1314 * @erp_action: ERP action for the adapter for which port data is requested
1315 * Returns: 0 on success, error otherwise
1316 */
1317 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1318 {
1319 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1320 struct qdio_buffer_element *sbale;
1321 struct zfcp_fsf_req *req;
1322 int retval = -EIO;
1323
1324 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1325 return -EOPNOTSUPP;
1326
1327 spin_lock_bh(&qdio->req_q_lock);
1328 if (zfcp_fsf_req_sbal_get(qdio))
1329 goto out;
1330
1331 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1332 qdio->adapter->pool.erp_req);
1333
1334 if (IS_ERR(req)) {
1335 retval = PTR_ERR(req);
1336 goto out;
1337 }
1338
1339 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1340 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1341 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1342 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1343
1344 req->handler = zfcp_fsf_exchange_port_data_handler;
1345 req->erp_action = erp_action;
1346 erp_action->fsf_req = req;
1347
1348 zfcp_fsf_start_erp_timer(req);
1349 retval = zfcp_fsf_req_send(req);
1350 if (retval) {
1351 zfcp_fsf_req_free(req);
1352 erp_action->fsf_req = NULL;
1353 }
1354 out:
1355 spin_unlock_bh(&qdio->req_q_lock);
1356 return retval;
1357 }
1358
1359 /**
1360 * zfcp_fsf_exchange_port_data_sync - request information about local port
1361 * @qdio: pointer to struct zfcp_qdio
1362 * @data: pointer to struct fsf_qtcb_bottom_port
1363 * Returns: 0 on success, error otherwise
1364 */
1365 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1366 struct fsf_qtcb_bottom_port *data)
1367 {
1368 struct qdio_buffer_element *sbale;
1369 struct zfcp_fsf_req *req = NULL;
1370 int retval = -EIO;
1371
1372 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1373 return -EOPNOTSUPP;
1374
1375 spin_lock_bh(&qdio->req_q_lock);
1376 if (zfcp_fsf_req_sbal_get(qdio))
1377 goto out_unlock;
1378
1379 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1380
1381 if (IS_ERR(req)) {
1382 retval = PTR_ERR(req);
1383 goto out_unlock;
1384 }
1385
1386 if (data)
1387 req->data = data;
1388
1389 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1390 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1391 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1392
1393 req->handler = zfcp_fsf_exchange_port_data_handler;
1394 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1395 retval = zfcp_fsf_req_send(req);
1396 spin_unlock_bh(&qdio->req_q_lock);
1397
1398 if (!retval)
1399 wait_for_completion(&req->completion);
1400
1401 zfcp_fsf_req_free(req);
1402
1403 return retval;
1404
1405 out_unlock:
1406 spin_unlock_bh(&qdio->req_q_lock);
1407 return retval;
1408 }
1409
1410 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1411 {
1412 struct zfcp_port *port = req->data;
1413 struct fsf_qtcb_header *header = &req->qtcb->header;
1414 struct fsf_plogi *plogi;
1415
1416 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1417 goto out;
1418
1419 switch (header->fsf_status) {
1420 case FSF_PORT_ALREADY_OPEN:
1421 break;
1422 case FSF_ACCESS_DENIED:
1423 zfcp_fsf_access_denied_port(req, port);
1424 break;
1425 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1426 dev_warn(&req->adapter->ccw_device->dev,
1427 "Not enough FCP adapter resources to open "
1428 "remote port 0x%016Lx\n",
1429 (unsigned long long)port->wwpn);
1430 zfcp_erp_port_failed(port, "fsoph_1", req);
1431 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1432 break;
1433 case FSF_ADAPTER_STATUS_AVAILABLE:
1434 switch (header->fsf_status_qual.word[0]) {
1435 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1436 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1437 case FSF_SQ_NO_RETRY_POSSIBLE:
1438 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1439 break;
1440 }
1441 break;
1442 case FSF_GOOD:
1443 port->handle = header->port_handle;
1444 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1445 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1446 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1447 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1448 &port->status);
1449 /* check whether D_ID has changed during open */
1450 /*
1451 * FIXME: This check is not airtight, as the FCP channel does
1452 * not monitor closures of target port connections caused on
1453 * the remote side. Thus, they might miss out on invalidating
1454 * locally cached WWPNs (and other N_Port parameters) of gone
1455 * target ports. So, our heroic attempt to make things safe
1456 * could be undermined by 'open port' response data tagged with
1457 * obsolete WWPNs. Another reason to monitor potential
1458 * connection closures ourself at least (by interpreting
1459 * incoming ELS' and unsolicited status). It just crosses my
1460 * mind that one should be able to cross-check by means of
1461 * another GID_PN straight after a port has been opened.
1462 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1463 */
1464 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1465 if (req->qtcb->bottom.support.els1_length >=
1466 FSF_PLOGI_MIN_LEN) {
1467 if (plogi->serv_param.wwpn != port->wwpn)
1468 port->d_id = 0;
1469 else {
1470 port->wwnn = plogi->serv_param.wwnn;
1471 zfcp_fc_plogi_evaluate(port, plogi);
1472 }
1473 }
1474 break;
1475 case FSF_UNKNOWN_OP_SUBTYPE:
1476 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1477 break;
1478 }
1479
1480 out:
1481 zfcp_port_put(port);
1482 }
1483
1484 /**
1485 * zfcp_fsf_open_port - create and send open port request
1486 * @erp_action: pointer to struct zfcp_erp_action
1487 * Returns: 0 on success, error otherwise
1488 */
1489 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1490 {
1491 struct qdio_buffer_element *sbale;
1492 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1493 struct zfcp_port *port = erp_action->port;
1494 struct zfcp_fsf_req *req;
1495 int retval = -EIO;
1496
1497 spin_lock_bh(&qdio->req_q_lock);
1498 if (zfcp_fsf_req_sbal_get(qdio))
1499 goto out;
1500
1501 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1502 qdio->adapter->pool.erp_req);
1503
1504 if (IS_ERR(req)) {
1505 retval = PTR_ERR(req);
1506 goto out;
1507 }
1508
1509 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1510 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1511 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1512 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1513
1514 req->handler = zfcp_fsf_open_port_handler;
1515 req->qtcb->bottom.support.d_id = port->d_id;
1516 req->data = port;
1517 req->erp_action = erp_action;
1518 erp_action->fsf_req = req;
1519 zfcp_port_get(port);
1520
1521 zfcp_fsf_start_erp_timer(req);
1522 retval = zfcp_fsf_req_send(req);
1523 if (retval) {
1524 zfcp_fsf_req_free(req);
1525 erp_action->fsf_req = NULL;
1526 zfcp_port_put(port);
1527 }
1528 out:
1529 spin_unlock_bh(&qdio->req_q_lock);
1530 return retval;
1531 }
1532
1533 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1534 {
1535 struct zfcp_port *port = req->data;
1536
1537 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1538 return;
1539
1540 switch (req->qtcb->header.fsf_status) {
1541 case FSF_PORT_HANDLE_NOT_VALID:
1542 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1543 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1544 break;
1545 case FSF_ADAPTER_STATUS_AVAILABLE:
1546 break;
1547 case FSF_GOOD:
1548 zfcp_erp_modify_port_status(port, "fscph_2", req,
1549 ZFCP_STATUS_COMMON_OPEN,
1550 ZFCP_CLEAR);
1551 break;
1552 }
1553 }
1554
1555 /**
1556 * zfcp_fsf_close_port - create and send close port request
1557 * @erp_action: pointer to struct zfcp_erp_action
1558 * Returns: 0 on success, error otherwise
1559 */
1560 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1561 {
1562 struct qdio_buffer_element *sbale;
1563 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1564 struct zfcp_fsf_req *req;
1565 int retval = -EIO;
1566
1567 spin_lock_bh(&qdio->req_q_lock);
1568 if (zfcp_fsf_req_sbal_get(qdio))
1569 goto out;
1570
1571 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1572 qdio->adapter->pool.erp_req);
1573
1574 if (IS_ERR(req)) {
1575 retval = PTR_ERR(req);
1576 goto out;
1577 }
1578
1579 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1580 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1581 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1582 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1583
1584 req->handler = zfcp_fsf_close_port_handler;
1585 req->data = erp_action->port;
1586 req->erp_action = erp_action;
1587 req->qtcb->header.port_handle = erp_action->port->handle;
1588 erp_action->fsf_req = req;
1589
1590 zfcp_fsf_start_erp_timer(req);
1591 retval = zfcp_fsf_req_send(req);
1592 if (retval) {
1593 zfcp_fsf_req_free(req);
1594 erp_action->fsf_req = NULL;
1595 }
1596 out:
1597 spin_unlock_bh(&qdio->req_q_lock);
1598 return retval;
1599 }
1600
1601 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1602 {
1603 struct zfcp_wka_port *wka_port = req->data;
1604 struct fsf_qtcb_header *header = &req->qtcb->header;
1605
1606 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1607 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1608 goto out;
1609 }
1610
1611 switch (header->fsf_status) {
1612 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1613 dev_warn(&req->adapter->ccw_device->dev,
1614 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1615 /* fall through */
1616 case FSF_ADAPTER_STATUS_AVAILABLE:
1617 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1618 /* fall through */
1619 case FSF_ACCESS_DENIED:
1620 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1621 break;
1622 case FSF_GOOD:
1623 wka_port->handle = header->port_handle;
1624 /* fall through */
1625 case FSF_PORT_ALREADY_OPEN:
1626 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1627 }
1628 out:
1629 wake_up(&wka_port->completion_wq);
1630 }
1631
1632 /**
1633 * zfcp_fsf_open_wka_port - create and send open wka-port request
1634 * @wka_port: pointer to struct zfcp_wka_port
1635 * Returns: 0 on success, error otherwise
1636 */
1637 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1638 {
1639 struct qdio_buffer_element *sbale;
1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1641 struct zfcp_fsf_req *req;
1642 int retval = -EIO;
1643
1644 spin_lock_bh(&qdio->req_q_lock);
1645 if (zfcp_fsf_req_sbal_get(qdio))
1646 goto out;
1647
1648 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1649 qdio->adapter->pool.erp_req);
1650
1651 if (unlikely(IS_ERR(req))) {
1652 retval = PTR_ERR(req);
1653 goto out;
1654 }
1655
1656 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1657 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1658 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1659 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1660
1661 req->handler = zfcp_fsf_open_wka_port_handler;
1662 req->qtcb->bottom.support.d_id = wka_port->d_id;
1663 req->data = wka_port;
1664
1665 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1666 retval = zfcp_fsf_req_send(req);
1667 if (retval)
1668 zfcp_fsf_req_free(req);
1669 out:
1670 spin_unlock_bh(&qdio->req_q_lock);
1671 return retval;
1672 }
1673
1674 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1675 {
1676 struct zfcp_wka_port *wka_port = req->data;
1677
1678 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1679 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1680 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1681 }
1682
1683 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1684 wake_up(&wka_port->completion_wq);
1685 }
1686
1687 /**
1688 * zfcp_fsf_close_wka_port - create and send close wka port request
1689 * @erp_action: pointer to struct zfcp_erp_action
1690 * Returns: 0 on success, error otherwise
1691 */
1692 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1693 {
1694 struct qdio_buffer_element *sbale;
1695 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1696 struct zfcp_fsf_req *req;
1697 int retval = -EIO;
1698
1699 spin_lock_bh(&qdio->req_q_lock);
1700 if (zfcp_fsf_req_sbal_get(qdio))
1701 goto out;
1702
1703 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1704 qdio->adapter->pool.erp_req);
1705
1706 if (unlikely(IS_ERR(req))) {
1707 retval = PTR_ERR(req);
1708 goto out;
1709 }
1710
1711 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1712 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1713 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1714 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1715
1716 req->handler = zfcp_fsf_close_wka_port_handler;
1717 req->data = wka_port;
1718 req->qtcb->header.port_handle = wka_port->handle;
1719
1720 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1721 retval = zfcp_fsf_req_send(req);
1722 if (retval)
1723 zfcp_fsf_req_free(req);
1724 out:
1725 spin_unlock_bh(&qdio->req_q_lock);
1726 return retval;
1727 }
1728
1729 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1730 {
1731 struct zfcp_port *port = req->data;
1732 struct fsf_qtcb_header *header = &req->qtcb->header;
1733 struct zfcp_unit *unit;
1734
1735 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1736 return;
1737
1738 switch (header->fsf_status) {
1739 case FSF_PORT_HANDLE_NOT_VALID:
1740 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1741 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1742 break;
1743 case FSF_ACCESS_DENIED:
1744 zfcp_fsf_access_denied_port(req, port);
1745 break;
1746 case FSF_PORT_BOXED:
1747 /* can't use generic zfcp_erp_modify_port_status because
1748 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1749 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1750 list_for_each_entry(unit, &port->unit_list_head, list)
1751 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1752 &unit->status);
1753 zfcp_erp_port_boxed(port, "fscpph2", req);
1754 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1755 ZFCP_STATUS_FSFREQ_RETRY;
1756
1757 break;
1758 case FSF_ADAPTER_STATUS_AVAILABLE:
1759 switch (header->fsf_status_qual.word[0]) {
1760 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1761 /* fall through */
1762 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1763 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1764 break;
1765 }
1766 break;
1767 case FSF_GOOD:
1768 /* can't use generic zfcp_erp_modify_port_status because
1769 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1770 */
1771 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1772 list_for_each_entry(unit, &port->unit_list_head, list)
1773 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1774 &unit->status);
1775 break;
1776 }
1777 }
1778
1779 /**
1780 * zfcp_fsf_close_physical_port - close physical port
1781 * @erp_action: pointer to struct zfcp_erp_action
1782 * Returns: 0 on success
1783 */
1784 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1785 {
1786 struct qdio_buffer_element *sbale;
1787 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1788 struct zfcp_fsf_req *req;
1789 int retval = -EIO;
1790
1791 spin_lock_bh(&qdio->req_q_lock);
1792 if (zfcp_fsf_req_sbal_get(qdio))
1793 goto out;
1794
1795 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1796 qdio->adapter->pool.erp_req);
1797
1798 if (IS_ERR(req)) {
1799 retval = PTR_ERR(req);
1800 goto out;
1801 }
1802
1803 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1804 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1805 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1806 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1807
1808 req->data = erp_action->port;
1809 req->qtcb->header.port_handle = erp_action->port->handle;
1810 req->erp_action = erp_action;
1811 req->handler = zfcp_fsf_close_physical_port_handler;
1812 erp_action->fsf_req = req;
1813
1814 zfcp_fsf_start_erp_timer(req);
1815 retval = zfcp_fsf_req_send(req);
1816 if (retval) {
1817 zfcp_fsf_req_free(req);
1818 erp_action->fsf_req = NULL;
1819 }
1820 out:
1821 spin_unlock_bh(&qdio->req_q_lock);
1822 return retval;
1823 }
1824
1825 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1826 {
1827 struct zfcp_adapter *adapter = req->adapter;
1828 struct zfcp_unit *unit = req->data;
1829 struct fsf_qtcb_header *header = &req->qtcb->header;
1830 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1831 struct fsf_queue_designator *queue_designator =
1832 &header->fsf_status_qual.fsf_queue_designator;
1833 int exclusive, readwrite;
1834
1835 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1836 return;
1837
1838 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1839 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1840 ZFCP_STATUS_UNIT_SHARED |
1841 ZFCP_STATUS_UNIT_READONLY,
1842 &unit->status);
1843
1844 switch (header->fsf_status) {
1845
1846 case FSF_PORT_HANDLE_NOT_VALID:
1847 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1848 /* fall through */
1849 case FSF_LUN_ALREADY_OPEN:
1850 break;
1851 case FSF_ACCESS_DENIED:
1852 zfcp_fsf_access_denied_unit(req, unit);
1853 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1854 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1855 break;
1856 case FSF_PORT_BOXED:
1857 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1858 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1859 ZFCP_STATUS_FSFREQ_RETRY;
1860 break;
1861 case FSF_LUN_SHARING_VIOLATION:
1862 if (header->fsf_status_qual.word[0])
1863 dev_warn(&adapter->ccw_device->dev,
1864 "LUN 0x%Lx on port 0x%Lx is already in "
1865 "use by CSS%d, MIF Image ID %x\n",
1866 (unsigned long long)unit->fcp_lun,
1867 (unsigned long long)unit->port->wwpn,
1868 queue_designator->cssid,
1869 queue_designator->hla);
1870 else
1871 zfcp_act_eval_err(adapter,
1872 header->fsf_status_qual.word[2]);
1873 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1874 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1875 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1876 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1877 break;
1878 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1879 dev_warn(&adapter->ccw_device->dev,
1880 "No handle is available for LUN "
1881 "0x%016Lx on port 0x%016Lx\n",
1882 (unsigned long long)unit->fcp_lun,
1883 (unsigned long long)unit->port->wwpn);
1884 zfcp_erp_unit_failed(unit, "fsouh_4", req);
1885 /* fall through */
1886 case FSF_INVALID_COMMAND_OPTION:
1887 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1888 break;
1889 case FSF_ADAPTER_STATUS_AVAILABLE:
1890 switch (header->fsf_status_qual.word[0]) {
1891 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1892 zfcp_fc_test_link(unit->port);
1893 /* fall through */
1894 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1895 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1896 break;
1897 }
1898 break;
1899
1900 case FSF_GOOD:
1901 unit->handle = header->lun_handle;
1902 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1903
1904 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1905 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1906 !zfcp_ccw_priv_sch(adapter)) {
1907 exclusive = (bottom->lun_access_info &
1908 FSF_UNIT_ACCESS_EXCLUSIVE);
1909 readwrite = (bottom->lun_access_info &
1910 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1911
1912 if (!exclusive)
1913 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1914 &unit->status);
1915
1916 if (!readwrite) {
1917 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1918 &unit->status);
1919 dev_info(&adapter->ccw_device->dev,
1920 "SCSI device at LUN 0x%016Lx on port "
1921 "0x%016Lx opened read-only\n",
1922 (unsigned long long)unit->fcp_lun,
1923 (unsigned long long)unit->port->wwpn);
1924 }
1925
1926 if (exclusive && !readwrite) {
1927 dev_err(&adapter->ccw_device->dev,
1928 "Exclusive read-only access not "
1929 "supported (unit 0x%016Lx, "
1930 "port 0x%016Lx)\n",
1931 (unsigned long long)unit->fcp_lun,
1932 (unsigned long long)unit->port->wwpn);
1933 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1934 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1935 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1936 } else if (!exclusive && readwrite) {
1937 dev_err(&adapter->ccw_device->dev,
1938 "Shared read-write access not "
1939 "supported (unit 0x%016Lx, port "
1940 "0x%016Lx)\n",
1941 (unsigned long long)unit->fcp_lun,
1942 (unsigned long long)unit->port->wwpn);
1943 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1944 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1945 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1946 }
1947 }
1948 break;
1949 }
1950 }
1951
1952 /**
1953 * zfcp_fsf_open_unit - open unit
1954 * @erp_action: pointer to struct zfcp_erp_action
1955 * Returns: 0 on success, error otherwise
1956 */
1957 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1958 {
1959 struct qdio_buffer_element *sbale;
1960 struct zfcp_adapter *adapter = erp_action->adapter;
1961 struct zfcp_qdio *qdio = adapter->qdio;
1962 struct zfcp_fsf_req *req;
1963 int retval = -EIO;
1964
1965 spin_lock_bh(&qdio->req_q_lock);
1966 if (zfcp_fsf_req_sbal_get(qdio))
1967 goto out;
1968
1969 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1970 adapter->pool.erp_req);
1971
1972 if (IS_ERR(req)) {
1973 retval = PTR_ERR(req);
1974 goto out;
1975 }
1976
1977 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1978 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1979 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1980 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1981
1982 req->qtcb->header.port_handle = erp_action->port->handle;
1983 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1984 req->handler = zfcp_fsf_open_unit_handler;
1985 req->data = erp_action->unit;
1986 req->erp_action = erp_action;
1987 erp_action->fsf_req = req;
1988
1989 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1990 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1991
1992 zfcp_fsf_start_erp_timer(req);
1993 retval = zfcp_fsf_req_send(req);
1994 if (retval) {
1995 zfcp_fsf_req_free(req);
1996 erp_action->fsf_req = NULL;
1997 }
1998 out:
1999 spin_unlock_bh(&qdio->req_q_lock);
2000 return retval;
2001 }
2002
2003 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2004 {
2005 struct zfcp_unit *unit = req->data;
2006
2007 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2008 return;
2009
2010 switch (req->qtcb->header.fsf_status) {
2011 case FSF_PORT_HANDLE_NOT_VALID:
2012 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
2013 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2014 break;
2015 case FSF_LUN_HANDLE_NOT_VALID:
2016 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
2017 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2018 break;
2019 case FSF_PORT_BOXED:
2020 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2021 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2022 ZFCP_STATUS_FSFREQ_RETRY;
2023 break;
2024 case FSF_ADAPTER_STATUS_AVAILABLE:
2025 switch (req->qtcb->header.fsf_status_qual.word[0]) {
2026 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2027 zfcp_fc_test_link(unit->port);
2028 /* fall through */
2029 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2030 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2031 break;
2032 }
2033 break;
2034 case FSF_GOOD:
2035 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2036 break;
2037 }
2038 }
2039
2040 /**
2041 * zfcp_fsf_close_unit - close zfcp unit
2042 * @erp_action: pointer to struct zfcp_unit
2043 * Returns: 0 on success, error otherwise
2044 */
2045 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2046 {
2047 struct qdio_buffer_element *sbale;
2048 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2049 struct zfcp_fsf_req *req;
2050 int retval = -EIO;
2051
2052 spin_lock_bh(&qdio->req_q_lock);
2053 if (zfcp_fsf_req_sbal_get(qdio))
2054 goto out;
2055
2056 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2057 qdio->adapter->pool.erp_req);
2058
2059 if (IS_ERR(req)) {
2060 retval = PTR_ERR(req);
2061 goto out;
2062 }
2063
2064 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2065 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2066 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2067 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2068
2069 req->qtcb->header.port_handle = erp_action->port->handle;
2070 req->qtcb->header.lun_handle = erp_action->unit->handle;
2071 req->handler = zfcp_fsf_close_unit_handler;
2072 req->data = erp_action->unit;
2073 req->erp_action = erp_action;
2074 erp_action->fsf_req = req;
2075
2076 zfcp_fsf_start_erp_timer(req);
2077 retval = zfcp_fsf_req_send(req);
2078 if (retval) {
2079 zfcp_fsf_req_free(req);
2080 erp_action->fsf_req = NULL;
2081 }
2082 out:
2083 spin_unlock_bh(&qdio->req_q_lock);
2084 return retval;
2085 }
2086
2087 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2088 {
2089 lat_rec->sum += lat;
2090 lat_rec->min = min(lat_rec->min, lat);
2091 lat_rec->max = max(lat_rec->max, lat);
2092 }
2093
2094 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2095 {
2096 struct fsf_qual_latency_info *lat_inf;
2097 struct latency_cont *lat;
2098 struct zfcp_unit *unit = req->unit;
2099
2100 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
2101
2102 switch (req->qtcb->bottom.io.data_direction) {
2103 case FSF_DATADIR_READ:
2104 lat = &unit->latencies.read;
2105 break;
2106 case FSF_DATADIR_WRITE:
2107 lat = &unit->latencies.write;
2108 break;
2109 case FSF_DATADIR_CMND:
2110 lat = &unit->latencies.cmd;
2111 break;
2112 default:
2113 return;
2114 }
2115
2116 spin_lock(&unit->latencies.lock);
2117 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2118 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2119 lat->counter++;
2120 spin_unlock(&unit->latencies.lock);
2121 }
2122
2123 #ifdef CONFIG_BLK_DEV_IO_TRACE
2124 static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2125 {
2126 struct fsf_qual_latency_info *lat_inf;
2127 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2128 struct request *req = scsi_cmnd->request;
2129 struct zfcp_blk_drv_data trace;
2130 int ticks = fsf_req->adapter->timer_ticks;
2131
2132 trace.flags = 0;
2133 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2134 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2135 trace.flags |= ZFCP_BLK_LAT_VALID;
2136 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
2137 trace.channel_lat = lat_inf->channel_lat * ticks;
2138 trace.fabric_lat = lat_inf->fabric_lat * ticks;
2139 }
2140 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2141 trace.flags |= ZFCP_BLK_REQ_ERROR;
2142 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2143 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2144
2145 blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2146 }
2147 #else
2148 static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2149 {
2150 }
2151 #endif
2152
2153 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2154 {
2155 struct scsi_cmnd *scpnt;
2156 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2157 &(req->qtcb->bottom.io.fcp_rsp);
2158 u32 sns_len;
2159 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2160 unsigned long flags;
2161
2162 read_lock_irqsave(&req->adapter->abort_lock, flags);
2163
2164 scpnt = req->data;
2165 if (unlikely(!scpnt)) {
2166 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2167 return;
2168 }
2169
2170 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2171 set_host_byte(scpnt, DID_SOFT_ERROR);
2172 goto skip_fsfstatus;
2173 }
2174
2175 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2176 set_host_byte(scpnt, DID_ERROR);
2177 goto skip_fsfstatus;
2178 }
2179
2180 set_msg_byte(scpnt, COMMAND_COMPLETE);
2181
2182 scpnt->result |= fcp_rsp_iu->scsi_status;
2183
2184 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2185 zfcp_fsf_req_latency(req);
2186
2187 zfcp_fsf_trace_latency(req);
2188
2189 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2190 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2191 set_host_byte(scpnt, DID_OK);
2192 else {
2193 set_host_byte(scpnt, DID_ERROR);
2194 goto skip_fsfstatus;
2195 }
2196 }
2197
2198 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2199 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2200 fcp_rsp_iu->fcp_rsp_len;
2201 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2202 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2203
2204 memcpy(scpnt->sense_buffer,
2205 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2206 }
2207
2208 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2209 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2210 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2211 scpnt->underflow)
2212 set_host_byte(scpnt, DID_ERROR);
2213 }
2214 skip_fsfstatus:
2215 if (scpnt->result != 0)
2216 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2217 else if (scpnt->retries > 0)
2218 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2219 else
2220 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2221
2222 scpnt->host_scribble = NULL;
2223 (scpnt->scsi_done) (scpnt);
2224 /*
2225 * We must hold this lock until scsi_done has been called.
2226 * Otherwise we may call scsi_done after abort regarding this
2227 * command has completed.
2228 * Note: scsi_done must not block!
2229 */
2230 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2231 }
2232
2233 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2234 {
2235 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2236 &(req->qtcb->bottom.io.fcp_rsp);
2237 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2238
2239 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
2240 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2241 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2242 }
2243
2244
2245 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2246 {
2247 struct zfcp_unit *unit;
2248 struct fsf_qtcb_header *header = &req->qtcb->header;
2249
2250 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2251 unit = req->data;
2252 else
2253 unit = req->unit;
2254
2255 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2256 goto skip_fsfstatus;
2257
2258 switch (header->fsf_status) {
2259 case FSF_HANDLE_MISMATCH:
2260 case FSF_PORT_HANDLE_NOT_VALID:
2261 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2262 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2263 break;
2264 case FSF_FCPLUN_NOT_VALID:
2265 case FSF_LUN_HANDLE_NOT_VALID:
2266 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2267 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2268 break;
2269 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2270 zfcp_fsf_class_not_supp(req);
2271 break;
2272 case FSF_ACCESS_DENIED:
2273 zfcp_fsf_access_denied_unit(req, unit);
2274 break;
2275 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2276 dev_err(&req->adapter->ccw_device->dev,
2277 "Incorrect direction %d, unit 0x%016Lx on port "
2278 "0x%016Lx closed\n",
2279 req->qtcb->bottom.io.data_direction,
2280 (unsigned long long)unit->fcp_lun,
2281 (unsigned long long)unit->port->wwpn);
2282 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2283 req);
2284 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2285 break;
2286 case FSF_CMND_LENGTH_NOT_VALID:
2287 dev_err(&req->adapter->ccw_device->dev,
2288 "Incorrect CDB length %d, unit 0x%016Lx on "
2289 "port 0x%016Lx closed\n",
2290 req->qtcb->bottom.io.fcp_cmnd_length,
2291 (unsigned long long)unit->fcp_lun,
2292 (unsigned long long)unit->port->wwpn);
2293 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2294 req);
2295 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2296 break;
2297 case FSF_PORT_BOXED:
2298 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2299 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2300 ZFCP_STATUS_FSFREQ_RETRY;
2301 break;
2302 case FSF_LUN_BOXED:
2303 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2304 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2305 ZFCP_STATUS_FSFREQ_RETRY;
2306 break;
2307 case FSF_ADAPTER_STATUS_AVAILABLE:
2308 if (header->fsf_status_qual.word[0] ==
2309 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2310 zfcp_fc_test_link(unit->port);
2311 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2312 break;
2313 }
2314 skip_fsfstatus:
2315 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2316 zfcp_fsf_send_fcp_ctm_handler(req);
2317 else {
2318 zfcp_fsf_send_fcp_command_task_handler(req);
2319 req->unit = NULL;
2320 zfcp_unit_put(unit);
2321 }
2322 }
2323
2324 static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2325 {
2326 u32 *fcp_dl_ptr;
2327
2328 /*
2329 * fcp_dl_addr = start address of fcp_cmnd structure +
2330 * size of fixed part + size of dynamically sized add_dcp_cdb field
2331 * SEE FCP-2 documentation
2332 */
2333 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2334 (fcp_cmd->add_fcp_cdb_length << 2));
2335 *fcp_dl_ptr = fcp_dl;
2336 }
2337
2338 /**
2339 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2340 * @unit: unit where command is sent to
2341 * @scsi_cmnd: scsi command to be sent
2342 */
2343 int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2344 struct scsi_cmnd *scsi_cmnd)
2345 {
2346 struct zfcp_fsf_req *req;
2347 struct fcp_cmnd_iu *fcp_cmnd_iu;
2348 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2349 int real_bytes, retval = -EIO;
2350 struct zfcp_adapter *adapter = unit->port->adapter;
2351 struct zfcp_qdio *qdio = adapter->qdio;
2352
2353 if (unlikely(!(atomic_read(&unit->status) &
2354 ZFCP_STATUS_COMMON_UNBLOCKED)))
2355 return -EBUSY;
2356
2357 spin_lock(&qdio->req_q_lock);
2358 if (atomic_read(&qdio->req_q.count) <= 0) {
2359 atomic_inc(&qdio->req_q_full);
2360 goto out;
2361 }
2362
2363 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2364 adapter->pool.scsi_req);
2365
2366 if (IS_ERR(req)) {
2367 retval = PTR_ERR(req);
2368 goto out;
2369 }
2370
2371 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2372 zfcp_unit_get(unit);
2373 req->unit = unit;
2374 req->data = scsi_cmnd;
2375 req->handler = zfcp_fsf_send_fcp_command_handler;
2376 req->qtcb->header.lun_handle = unit->handle;
2377 req->qtcb->header.port_handle = unit->port->handle;
2378 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2379
2380 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2381
2382 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2383 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2384 /*
2385 * set depending on data direction:
2386 * data direction bits in SBALE (SB Type)
2387 * data direction bits in QTCB
2388 * data direction bits in FCP_CMND IU
2389 */
2390 switch (scsi_cmnd->sc_data_direction) {
2391 case DMA_NONE:
2392 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2393 break;
2394 case DMA_FROM_DEVICE:
2395 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2396 fcp_cmnd_iu->rddata = 1;
2397 break;
2398 case DMA_TO_DEVICE:
2399 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2400 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2401 fcp_cmnd_iu->wddata = 1;
2402 break;
2403 case DMA_BIDIRECTIONAL:
2404 goto failed_scsi_cmnd;
2405 }
2406
2407 if (likely((scsi_cmnd->device->simple_tags) ||
2408 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
2409 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2410 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2411 else
2412 fcp_cmnd_iu->task_attribute = UNTAGGED;
2413
2414 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2415 fcp_cmnd_iu->add_fcp_cdb_length =
2416 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2417
2418 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2419
2420 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2421 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2422
2423 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2424 scsi_sglist(scsi_cmnd),
2425 FSF_MAX_SBALS_PER_REQ);
2426 if (unlikely(real_bytes < 0)) {
2427 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2428 dev_err(&adapter->ccw_device->dev,
2429 "Oversize data package, unit 0x%016Lx "
2430 "on port 0x%016Lx closed\n",
2431 (unsigned long long)unit->fcp_lun,
2432 (unsigned long long)unit->port->wwpn);
2433 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2434 retval = -EINVAL;
2435 }
2436 goto failed_scsi_cmnd;
2437 }
2438
2439 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2440
2441 retval = zfcp_fsf_req_send(req);
2442 if (unlikely(retval))
2443 goto failed_scsi_cmnd;
2444
2445 goto out;
2446
2447 failed_scsi_cmnd:
2448 zfcp_unit_put(unit);
2449 zfcp_fsf_req_free(req);
2450 scsi_cmnd->host_scribble = NULL;
2451 out:
2452 spin_unlock(&qdio->req_q_lock);
2453 return retval;
2454 }
2455
2456 /**
2457 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2458 * @unit: pointer to struct zfcp_unit
2459 * @tm_flags: unsigned byte for task management flags
2460 * Returns: on success pointer to struct fsf_req, NULL otherwise
2461 */
2462 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2463 {
2464 struct qdio_buffer_element *sbale;
2465 struct zfcp_fsf_req *req = NULL;
2466 struct fcp_cmnd_iu *fcp_cmnd_iu;
2467 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2468
2469 if (unlikely(!(atomic_read(&unit->status) &
2470 ZFCP_STATUS_COMMON_UNBLOCKED)))
2471 return NULL;
2472
2473 spin_lock_bh(&qdio->req_q_lock);
2474 if (zfcp_fsf_req_sbal_get(qdio))
2475 goto out;
2476
2477 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2478 qdio->adapter->pool.scsi_req);
2479
2480 if (IS_ERR(req)) {
2481 req = NULL;
2482 goto out;
2483 }
2484
2485 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2486 req->data = unit;
2487 req->handler = zfcp_fsf_send_fcp_command_handler;
2488 req->qtcb->header.lun_handle = unit->handle;
2489 req->qtcb->header.port_handle = unit->port->handle;
2490 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2491 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2492 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2493 sizeof(u32);
2494
2495 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2496 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2497 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2498
2499 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
2500 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2501 fcp_cmnd_iu->task_management_flags = tm_flags;
2502
2503 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2504 if (!zfcp_fsf_req_send(req))
2505 goto out;
2506
2507 zfcp_fsf_req_free(req);
2508 req = NULL;
2509 out:
2510 spin_unlock_bh(&qdio->req_q_lock);
2511 return req;
2512 }
2513
2514 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2515 {
2516 }
2517
2518 /**
2519 * zfcp_fsf_control_file - control file upload/download
2520 * @adapter: pointer to struct zfcp_adapter
2521 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2522 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2523 */
2524 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2525 struct zfcp_fsf_cfdc *fsf_cfdc)
2526 {
2527 struct qdio_buffer_element *sbale;
2528 struct zfcp_qdio *qdio = adapter->qdio;
2529 struct zfcp_fsf_req *req = NULL;
2530 struct fsf_qtcb_bottom_support *bottom;
2531 int direction, retval = -EIO, bytes;
2532
2533 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2534 return ERR_PTR(-EOPNOTSUPP);
2535
2536 switch (fsf_cfdc->command) {
2537 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2538 direction = SBAL_FLAGS0_TYPE_WRITE;
2539 break;
2540 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2541 direction = SBAL_FLAGS0_TYPE_READ;
2542 break;
2543 default:
2544 return ERR_PTR(-EINVAL);
2545 }
2546
2547 spin_lock_bh(&qdio->req_q_lock);
2548 if (zfcp_fsf_req_sbal_get(qdio))
2549 goto out;
2550
2551 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
2552 if (IS_ERR(req)) {
2553 retval = -EPERM;
2554 goto out;
2555 }
2556
2557 req->handler = zfcp_fsf_control_file_handler;
2558
2559 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2560 sbale[0].flags |= direction;
2561
2562 bottom = &req->qtcb->bottom.support;
2563 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2564 bottom->option = fsf_cfdc->option;
2565
2566 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
2567 direction, fsf_cfdc->sg,
2568 FSF_MAX_SBALS_PER_REQ);
2569 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2570 zfcp_fsf_req_free(req);
2571 goto out;
2572 }
2573
2574 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2575 retval = zfcp_fsf_req_send(req);
2576 out:
2577 spin_unlock_bh(&qdio->req_q_lock);
2578
2579 if (!retval) {
2580 wait_for_completion(&req->completion);
2581 return req;
2582 }
2583 return ERR_PTR(retval);
2584 }
2585
2586 /**
2587 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2588 * @adapter: pointer to struct zfcp_adapter
2589 * @sbal_idx: response queue index of SBAL to be processed
2590 */
2591 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2592 {
2593 struct zfcp_adapter *adapter = qdio->adapter;
2594 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2595 struct qdio_buffer_element *sbale;
2596 struct zfcp_fsf_req *fsf_req;
2597 unsigned long flags, req_id;
2598 int idx;
2599
2600 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2601
2602 sbale = &sbal->element[idx];
2603 req_id = (unsigned long) sbale->addr;
2604 spin_lock_irqsave(&adapter->req_list_lock, flags);
2605 fsf_req = zfcp_reqlist_find(adapter, req_id);
2606
2607 if (!fsf_req)
2608 /*
2609 * Unknown request means that we have potentially memory
2610 * corruption and must stop the machine immediately.
2611 */
2612 panic("error: unknown req_id (%lx) on adapter %s.\n",
2613 req_id, dev_name(&adapter->ccw_device->dev));
2614
2615 list_del(&fsf_req->list);
2616 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2617
2618 fsf_req->queue_req.sbal_response = sbal_idx;
2619 fsf_req->queue_req.qdio_inb_usage =
2620 atomic_read(&qdio->resp_q.count);
2621 zfcp_fsf_req_complete(fsf_req);
2622
2623 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2624 break;
2625 }
2626 }
This page took 0.130367 seconds and 5 git commands to generate.