Merge branch 'drbd-8.4_ed6' into for-3.8-drivers-drbd-8.4_ed6
[deliverable/linux.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HIERARCHY
36 *
37 * The following hierarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the hierarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler because no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/delay.h>
92 #include <linux/module.h>
93 #include <linux/slab.h>
94 #include <asm/unaligned.h>
95
96 #include <scsi/fc/fc_gs.h>
97
98 #include <scsi/libfc.h>
99 #include <scsi/fc_encode.h>
100 #include <linux/scatterlist.h>
101
102 #include "fc_libfc.h"
103
104 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
105 #define FC_LOCAL_PTP_FID_LO 0x010101
106 #define FC_LOCAL_PTP_FID_HI 0x010102
107
108 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
109
110 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
111
112 static void fc_lport_enter_reset(struct fc_lport *);
113 static void fc_lport_enter_flogi(struct fc_lport *);
114 static void fc_lport_enter_dns(struct fc_lport *);
115 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
116 static void fc_lport_enter_scr(struct fc_lport *);
117 static void fc_lport_enter_ready(struct fc_lport *);
118 static void fc_lport_enter_logo(struct fc_lport *);
119 static void fc_lport_enter_fdmi(struct fc_lport *lport);
120 static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
121
122 static const char *fc_lport_state_names[] = {
123 [LPORT_ST_DISABLED] = "disabled",
124 [LPORT_ST_FLOGI] = "FLOGI",
125 [LPORT_ST_DNS] = "dNS",
126 [LPORT_ST_RNN_ID] = "RNN_ID",
127 [LPORT_ST_RSNN_NN] = "RSNN_NN",
128 [LPORT_ST_RSPN_ID] = "RSPN_ID",
129 [LPORT_ST_RFT_ID] = "RFT_ID",
130 [LPORT_ST_RFF_ID] = "RFF_ID",
131 [LPORT_ST_FDMI] = "FDMI",
132 [LPORT_ST_RHBA] = "RHBA",
133 [LPORT_ST_RPA] = "RPA",
134 [LPORT_ST_DHBA] = "DHBA",
135 [LPORT_ST_DPRT] = "DPRT",
136 [LPORT_ST_SCR] = "SCR",
137 [LPORT_ST_READY] = "Ready",
138 [LPORT_ST_LOGO] = "LOGO",
139 [LPORT_ST_RESET] = "reset",
140 };
141
142 /**
143 * struct fc_bsg_info - FC Passthrough managemet structure
144 * @job: The passthrough job
145 * @lport: The local port to pass through a command
146 * @rsp_code: The expected response code
147 * @sg: job->reply_payload.sg_list
148 * @nents: job->reply_payload.sg_cnt
149 * @offset: The offset into the response data
150 */
151 struct fc_bsg_info {
152 struct fc_bsg_job *job;
153 struct fc_lport *lport;
154 u16 rsp_code;
155 struct scatterlist *sg;
156 u32 nents;
157 size_t offset;
158 };
159
160 /**
161 * fc_frame_drop() - Dummy frame handler
162 * @lport: The local port the frame was received on
163 * @fp: The received frame
164 */
165 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
166 {
167 fc_frame_free(fp);
168 return 0;
169 }
170
171 /**
172 * fc_lport_rport_callback() - Event handler for rport events
173 * @lport: The lport which is receiving the event
174 * @rdata: private remote port data
175 * @event: The event that occurred
176 *
177 * Locking Note: The rport lock should not be held when calling
178 * this function.
179 */
180 static void fc_lport_rport_callback(struct fc_lport *lport,
181 struct fc_rport_priv *rdata,
182 enum fc_rport_event event)
183 {
184 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
185 rdata->ids.port_id);
186
187 mutex_lock(&lport->lp_mutex);
188 switch (event) {
189 case RPORT_EV_READY:
190 if (lport->state == LPORT_ST_DNS) {
191 lport->dns_rdata = rdata;
192 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
193 } else if (lport->state == LPORT_ST_FDMI) {
194 lport->ms_rdata = rdata;
195 fc_lport_enter_ms(lport, LPORT_ST_DHBA);
196 } else {
197 FC_LPORT_DBG(lport, "Received an READY event "
198 "on port (%6.6x) for the directory "
199 "server, but the lport is not "
200 "in the DNS or FDMI state, it's in the "
201 "%d state", rdata->ids.port_id,
202 lport->state);
203 lport->tt.rport_logoff(rdata);
204 }
205 break;
206 case RPORT_EV_LOGO:
207 case RPORT_EV_FAILED:
208 case RPORT_EV_STOP:
209 if (rdata->ids.port_id == FC_FID_DIR_SERV)
210 lport->dns_rdata = NULL;
211 else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
212 lport->ms_rdata = NULL;
213 break;
214 case RPORT_EV_NONE:
215 break;
216 }
217 mutex_unlock(&lport->lp_mutex);
218 }
219
220 /**
221 * fc_lport_state() - Return a string which represents the lport's state
222 * @lport: The lport whose state is to converted to a string
223 */
224 static const char *fc_lport_state(struct fc_lport *lport)
225 {
226 const char *cp;
227
228 cp = fc_lport_state_names[lport->state];
229 if (!cp)
230 cp = "unknown";
231 return cp;
232 }
233
234 /**
235 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
236 * @lport: The lport to attach the ptp rport to
237 * @remote_fid: The FID of the ptp rport
238 * @remote_wwpn: The WWPN of the ptp rport
239 * @remote_wwnn: The WWNN of the ptp rport
240 */
241 static void fc_lport_ptp_setup(struct fc_lport *lport,
242 u32 remote_fid, u64 remote_wwpn,
243 u64 remote_wwnn)
244 {
245 mutex_lock(&lport->disc.disc_mutex);
246 if (lport->ptp_rdata) {
247 lport->tt.rport_logoff(lport->ptp_rdata);
248 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
249 }
250 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
251 kref_get(&lport->ptp_rdata->kref);
252 lport->ptp_rdata->ids.port_name = remote_wwpn;
253 lport->ptp_rdata->ids.node_name = remote_wwnn;
254 mutex_unlock(&lport->disc.disc_mutex);
255
256 lport->tt.rport_login(lport->ptp_rdata);
257
258 fc_lport_enter_ready(lport);
259 }
260
261 /**
262 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
263 * @shost: The SCSI host whose port state is to be determined
264 */
265 void fc_get_host_port_state(struct Scsi_Host *shost)
266 {
267 struct fc_lport *lport = shost_priv(shost);
268
269 mutex_lock(&lport->lp_mutex);
270 if (!lport->link_up)
271 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
272 else
273 switch (lport->state) {
274 case LPORT_ST_READY:
275 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
276 break;
277 default:
278 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
279 }
280 mutex_unlock(&lport->lp_mutex);
281 }
282 EXPORT_SYMBOL(fc_get_host_port_state);
283
284 /**
285 * fc_get_host_speed() - Return the speed of the given Scsi_Host
286 * @shost: The SCSI host whose port speed is to be determined
287 */
288 void fc_get_host_speed(struct Scsi_Host *shost)
289 {
290 struct fc_lport *lport = shost_priv(shost);
291
292 fc_host_speed(shost) = lport->link_speed;
293 }
294 EXPORT_SYMBOL(fc_get_host_speed);
295
296 /**
297 * fc_get_host_stats() - Return the Scsi_Host's statistics
298 * @shost: The SCSI host whose statistics are to be returned
299 */
300 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
301 {
302 struct fc_host_statistics *fc_stats;
303 struct fc_lport *lport = shost_priv(shost);
304 struct timespec v0, v1;
305 unsigned int cpu;
306 u64 fcp_in_bytes = 0;
307 u64 fcp_out_bytes = 0;
308
309 fc_stats = &lport->host_stats;
310 memset(fc_stats, 0, sizeof(struct fc_host_statistics));
311
312 jiffies_to_timespec(jiffies, &v0);
313 jiffies_to_timespec(lport->boot_time, &v1);
314 fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
315
316 for_each_possible_cpu(cpu) {
317 struct fc_stats *stats;
318
319 stats = per_cpu_ptr(lport->stats, cpu);
320
321 fc_stats->tx_frames += stats->TxFrames;
322 fc_stats->tx_words += stats->TxWords;
323 fc_stats->rx_frames += stats->RxFrames;
324 fc_stats->rx_words += stats->RxWords;
325 fc_stats->error_frames += stats->ErrorFrames;
326 fc_stats->invalid_crc_count += stats->InvalidCRCCount;
327 fc_stats->fcp_input_requests += stats->InputRequests;
328 fc_stats->fcp_output_requests += stats->OutputRequests;
329 fc_stats->fcp_control_requests += stats->ControlRequests;
330 fcp_in_bytes += stats->InputBytes;
331 fcp_out_bytes += stats->OutputBytes;
332 fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
333 fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
334 fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
335 fc_stats->link_failure_count += stats->LinkFailureCount;
336 }
337 fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
338 fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
339 fc_stats->lip_count = -1;
340 fc_stats->nos_count = -1;
341 fc_stats->loss_of_sync_count = -1;
342 fc_stats->loss_of_signal_count = -1;
343 fc_stats->prim_seq_protocol_err_count = -1;
344 fc_stats->dumped_frames = -1;
345
346 /* update exches stats */
347 fc_exch_update_stats(lport);
348
349 return fc_stats;
350 }
351 EXPORT_SYMBOL(fc_get_host_stats);
352
353 /**
354 * fc_lport_flogi_fill() - Fill in FLOGI command for request
355 * @lport: The local port the FLOGI is for
356 * @flogi: The FLOGI command
357 * @op: The opcode
358 */
359 static void fc_lport_flogi_fill(struct fc_lport *lport,
360 struct fc_els_flogi *flogi,
361 unsigned int op)
362 {
363 struct fc_els_csp *sp;
364 struct fc_els_cssp *cp;
365
366 memset(flogi, 0, sizeof(*flogi));
367 flogi->fl_cmd = (u8) op;
368 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
369 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
370 sp = &flogi->fl_csp;
371 sp->sp_hi_ver = 0x20;
372 sp->sp_lo_ver = 0x20;
373 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
374 sp->sp_bb_data = htons((u16) lport->mfs);
375 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
376 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
377 if (op != ELS_FLOGI) {
378 sp->sp_features = htons(FC_SP_FT_CIRO);
379 sp->sp_tot_seq = htons(255); /* seq. we accept */
380 sp->sp_rel_off = htons(0x1f);
381 sp->sp_e_d_tov = htonl(lport->e_d_tov);
382
383 cp->cp_rdfs = htons((u16) lport->mfs);
384 cp->cp_con_seq = htons(255);
385 cp->cp_open_seq = 1;
386 }
387 }
388
389 /**
390 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
391 * @lport: The local port to add a new FC-4 type to
392 * @type: The new FC-4 type
393 */
394 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
395 {
396 __be32 *mp;
397
398 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
399 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
400 }
401
402 /**
403 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
404 * @lport: Fibre Channel local port receiving the RLIR
405 * @fp: The RLIR request frame
406 *
407 * Locking Note: The lport lock is expected to be held before calling
408 * this function.
409 */
410 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
411 {
412 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
413 fc_lport_state(lport));
414
415 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
416 fc_frame_free(fp);
417 }
418
419 /**
420 * fc_lport_recv_echo_req() - Handle received ECHO request
421 * @lport: The local port receiving the ECHO
422 * @fp: ECHO request frame
423 *
424 * Locking Note: The lport lock is expected to be held before calling
425 * this function.
426 */
427 static void fc_lport_recv_echo_req(struct fc_lport *lport,
428 struct fc_frame *in_fp)
429 {
430 struct fc_frame *fp;
431 unsigned int len;
432 void *pp;
433 void *dp;
434
435 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
436 fc_lport_state(lport));
437
438 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
439 pp = fc_frame_payload_get(in_fp, len);
440
441 if (len < sizeof(__be32))
442 len = sizeof(__be32);
443
444 fp = fc_frame_alloc(lport, len);
445 if (fp) {
446 dp = fc_frame_payload_get(fp, len);
447 memcpy(dp, pp, len);
448 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
449 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
450 lport->tt.frame_send(lport, fp);
451 }
452 fc_frame_free(in_fp);
453 }
454
455 /**
456 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
457 * @lport: The local port receiving the RNID
458 * @fp: The RNID request frame
459 *
460 * Locking Note: The lport lock is expected to be held before calling
461 * this function.
462 */
463 static void fc_lport_recv_rnid_req(struct fc_lport *lport,
464 struct fc_frame *in_fp)
465 {
466 struct fc_frame *fp;
467 struct fc_els_rnid *req;
468 struct {
469 struct fc_els_rnid_resp rnid;
470 struct fc_els_rnid_cid cid;
471 struct fc_els_rnid_gen gen;
472 } *rp;
473 struct fc_seq_els_data rjt_data;
474 u8 fmt;
475 size_t len;
476
477 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
478 fc_lport_state(lport));
479
480 req = fc_frame_payload_get(in_fp, sizeof(*req));
481 if (!req) {
482 rjt_data.reason = ELS_RJT_LOGIC;
483 rjt_data.explan = ELS_EXPL_NONE;
484 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
485 } else {
486 fmt = req->rnid_fmt;
487 len = sizeof(*rp);
488 if (fmt != ELS_RNIDF_GEN ||
489 ntohl(lport->rnid_gen.rnid_atype) == 0) {
490 fmt = ELS_RNIDF_NONE; /* nothing to provide */
491 len -= sizeof(rp->gen);
492 }
493 fp = fc_frame_alloc(lport, len);
494 if (fp) {
495 rp = fc_frame_payload_get(fp, len);
496 memset(rp, 0, len);
497 rp->rnid.rnid_cmd = ELS_LS_ACC;
498 rp->rnid.rnid_fmt = fmt;
499 rp->rnid.rnid_cid_len = sizeof(rp->cid);
500 rp->cid.rnid_wwpn = htonll(lport->wwpn);
501 rp->cid.rnid_wwnn = htonll(lport->wwnn);
502 if (fmt == ELS_RNIDF_GEN) {
503 rp->rnid.rnid_sid_len = sizeof(rp->gen);
504 memcpy(&rp->gen, &lport->rnid_gen,
505 sizeof(rp->gen));
506 }
507 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
508 lport->tt.frame_send(lport, fp);
509 }
510 }
511 fc_frame_free(in_fp);
512 }
513
514 /**
515 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
516 * @lport: The local port receiving the LOGO
517 * @fp: The LOGO request frame
518 *
519 * Locking Note: The lport lock is exected to be held before calling
520 * this function.
521 */
522 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
523 {
524 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
525 fc_lport_enter_reset(lport);
526 fc_frame_free(fp);
527 }
528
529 /**
530 * fc_fabric_login() - Start the lport state machine
531 * @lport: The local port that should log into the fabric
532 *
533 * Locking Note: This function should not be called
534 * with the lport lock held.
535 */
536 int fc_fabric_login(struct fc_lport *lport)
537 {
538 int rc = -1;
539
540 mutex_lock(&lport->lp_mutex);
541 if (lport->state == LPORT_ST_DISABLED ||
542 lport->state == LPORT_ST_LOGO) {
543 fc_lport_state_enter(lport, LPORT_ST_RESET);
544 fc_lport_enter_reset(lport);
545 rc = 0;
546 }
547 mutex_unlock(&lport->lp_mutex);
548
549 return rc;
550 }
551 EXPORT_SYMBOL(fc_fabric_login);
552
553 /**
554 * __fc_linkup() - Handler for transport linkup events
555 * @lport: The lport whose link is up
556 *
557 * Locking: must be called with the lp_mutex held
558 */
559 void __fc_linkup(struct fc_lport *lport)
560 {
561 if (!lport->link_up) {
562 lport->link_up = 1;
563
564 if (lport->state == LPORT_ST_RESET)
565 fc_lport_enter_flogi(lport);
566 }
567 }
568
569 /**
570 * fc_linkup() - Handler for transport linkup events
571 * @lport: The local port whose link is up
572 */
573 void fc_linkup(struct fc_lport *lport)
574 {
575 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
576 lport->host->host_no, lport->port_id);
577
578 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport);
580 mutex_unlock(&lport->lp_mutex);
581 }
582 EXPORT_SYMBOL(fc_linkup);
583
584 /**
585 * __fc_linkdown() - Handler for transport linkdown events
586 * @lport: The lport whose link is down
587 *
588 * Locking: must be called with the lp_mutex held
589 */
590 void __fc_linkdown(struct fc_lport *lport)
591 {
592 if (lport->link_up) {
593 lport->link_up = 0;
594 fc_lport_enter_reset(lport);
595 lport->tt.fcp_cleanup(lport);
596 }
597 }
598
599 /**
600 * fc_linkdown() - Handler for transport linkdown events
601 * @lport: The local port whose link is down
602 */
603 void fc_linkdown(struct fc_lport *lport)
604 {
605 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
606 lport->host->host_no, lport->port_id);
607
608 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport);
610 mutex_unlock(&lport->lp_mutex);
611 }
612 EXPORT_SYMBOL(fc_linkdown);
613
614 /**
615 * fc_fabric_logoff() - Logout of the fabric
616 * @lport: The local port to logoff the fabric
617 *
618 * Return value:
619 * 0 for success, -1 for failure
620 */
621 int fc_fabric_logoff(struct fc_lport *lport)
622 {
623 lport->tt.disc_stop_final(lport);
624 mutex_lock(&lport->lp_mutex);
625 if (lport->dns_rdata)
626 lport->tt.rport_logoff(lport->dns_rdata);
627 mutex_unlock(&lport->lp_mutex);
628 lport->tt.rport_flush_queue();
629 mutex_lock(&lport->lp_mutex);
630 fc_lport_enter_logo(lport);
631 mutex_unlock(&lport->lp_mutex);
632 cancel_delayed_work_sync(&lport->retry_work);
633 return 0;
634 }
635 EXPORT_SYMBOL(fc_fabric_logoff);
636
637 /**
638 * fc_lport_destroy() - Unregister a fc_lport
639 * @lport: The local port to unregister
640 *
641 * Note:
642 * exit routine for fc_lport instance
643 * clean-up all the allocated memory
644 * and free up other system resources.
645 *
646 */
647 int fc_lport_destroy(struct fc_lport *lport)
648 {
649 mutex_lock(&lport->lp_mutex);
650 lport->state = LPORT_ST_DISABLED;
651 lport->link_up = 0;
652 lport->tt.frame_send = fc_frame_drop;
653 mutex_unlock(&lport->lp_mutex);
654
655 lport->tt.fcp_abort_io(lport);
656 lport->tt.disc_stop_final(lport);
657 lport->tt.exch_mgr_reset(lport, 0, 0);
658 cancel_delayed_work_sync(&lport->retry_work);
659 fc_fc4_del_lport(lport);
660 return 0;
661 }
662 EXPORT_SYMBOL(fc_lport_destroy);
663
664 /**
665 * fc_set_mfs() - Set the maximum frame size for a local port
666 * @lport: The local port to set the MFS for
667 * @mfs: The new MFS
668 */
669 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
670 {
671 unsigned int old_mfs;
672 int rc = -EINVAL;
673
674 mutex_lock(&lport->lp_mutex);
675
676 old_mfs = lport->mfs;
677
678 if (mfs >= FC_MIN_MAX_FRAME) {
679 mfs &= ~3;
680 if (mfs > FC_MAX_FRAME)
681 mfs = FC_MAX_FRAME;
682 mfs -= sizeof(struct fc_frame_header);
683 lport->mfs = mfs;
684 rc = 0;
685 }
686
687 if (!rc && mfs < old_mfs)
688 fc_lport_enter_reset(lport);
689
690 mutex_unlock(&lport->lp_mutex);
691
692 return rc;
693 }
694 EXPORT_SYMBOL(fc_set_mfs);
695
696 /**
697 * fc_lport_disc_callback() - Callback for discovery events
698 * @lport: The local port receiving the event
699 * @event: The discovery event
700 */
701 static void fc_lport_disc_callback(struct fc_lport *lport,
702 enum fc_disc_event event)
703 {
704 switch (event) {
705 case DISC_EV_SUCCESS:
706 FC_LPORT_DBG(lport, "Discovery succeeded\n");
707 break;
708 case DISC_EV_FAILED:
709 printk(KERN_ERR "host%d: libfc: "
710 "Discovery failed for port (%6.6x)\n",
711 lport->host->host_no, lport->port_id);
712 mutex_lock(&lport->lp_mutex);
713 fc_lport_enter_reset(lport);
714 mutex_unlock(&lport->lp_mutex);
715 break;
716 case DISC_EV_NONE:
717 WARN_ON(1);
718 break;
719 }
720 }
721
722 /**
723 * fc_rport_enter_ready() - Enter the ready state and start discovery
724 * @lport: The local port that is ready
725 *
726 * Locking Note: The lport lock is expected to be held before calling
727 * this routine.
728 */
729 static void fc_lport_enter_ready(struct fc_lport *lport)
730 {
731 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
732 fc_lport_state(lport));
733
734 fc_lport_state_enter(lport, LPORT_ST_READY);
735 if (lport->vport)
736 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
737 fc_vports_linkchange(lport);
738
739 if (!lport->ptp_rdata)
740 lport->tt.disc_start(fc_lport_disc_callback, lport);
741 }
742
743 /**
744 * fc_lport_set_port_id() - set the local port Port ID
745 * @lport: The local port which will have its Port ID set.
746 * @port_id: The new port ID.
747 * @fp: The frame containing the incoming request, or NULL.
748 *
749 * Locking Note: The lport lock is expected to be held before calling
750 * this function.
751 */
752 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
753 struct fc_frame *fp)
754 {
755 if (port_id)
756 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
757 lport->host->host_no, port_id);
758
759 lport->port_id = port_id;
760
761 /* Update the fc_host */
762 fc_host_port_id(lport->host) = port_id;
763
764 if (lport->tt.lport_set_port_id)
765 lport->tt.lport_set_port_id(lport, port_id, fp);
766 }
767
768 /**
769 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
770 * @lport: The local port which will have its Port ID set.
771 * @port_id: The new port ID.
772 *
773 * Called by the lower-level driver when transport sets the local port_id.
774 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
775 * discovery to be skipped.
776 */
777 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
778 {
779 mutex_lock(&lport->lp_mutex);
780
781 fc_lport_set_port_id(lport, port_id, NULL);
782
783 switch (lport->state) {
784 case LPORT_ST_RESET:
785 case LPORT_ST_FLOGI:
786 if (port_id)
787 fc_lport_enter_ready(lport);
788 break;
789 default:
790 break;
791 }
792 mutex_unlock(&lport->lp_mutex);
793 }
794 EXPORT_SYMBOL(fc_lport_set_local_id);
795
796 /**
797 * fc_lport_recv_flogi_req() - Receive a FLOGI request
798 * @lport: The local port that received the request
799 * @rx_fp: The FLOGI frame
800 *
801 * A received FLOGI request indicates a point-to-point connection.
802 * Accept it with the common service parameters indicating our N port.
803 * Set up to do a PLOGI if we have the higher-number WWPN.
804 *
805 * Locking Note: The lport lock is expected to be held before calling
806 * this function.
807 */
808 static void fc_lport_recv_flogi_req(struct fc_lport *lport,
809 struct fc_frame *rx_fp)
810 {
811 struct fc_frame *fp;
812 struct fc_frame_header *fh;
813 struct fc_els_flogi *flp;
814 struct fc_els_flogi *new_flp;
815 u64 remote_wwpn;
816 u32 remote_fid;
817 u32 local_fid;
818
819 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
820 fc_lport_state(lport));
821
822 remote_fid = fc_frame_sid(rx_fp);
823 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
824 if (!flp)
825 goto out;
826 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
827 if (remote_wwpn == lport->wwpn) {
828 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
829 "with same WWPN %16.16llx\n",
830 lport->host->host_no, remote_wwpn);
831 goto out;
832 }
833 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
834
835 /*
836 * XXX what is the right thing to do for FIDs?
837 * The originator might expect our S_ID to be 0xfffffe.
838 * But if so, both of us could end up with the same FID.
839 */
840 local_fid = FC_LOCAL_PTP_FID_LO;
841 if (remote_wwpn < lport->wwpn) {
842 local_fid = FC_LOCAL_PTP_FID_HI;
843 if (!remote_fid || remote_fid == local_fid)
844 remote_fid = FC_LOCAL_PTP_FID_LO;
845 } else if (!remote_fid) {
846 remote_fid = FC_LOCAL_PTP_FID_HI;
847 }
848
849 fc_lport_set_port_id(lport, local_fid, rx_fp);
850
851 fp = fc_frame_alloc(lport, sizeof(*flp));
852 if (fp) {
853 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
854 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
855 new_flp->fl_cmd = (u8) ELS_LS_ACC;
856
857 /*
858 * Send the response. If this fails, the originator should
859 * repeat the sequence.
860 */
861 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
862 fh = fc_frame_header_get(fp);
863 hton24(fh->fh_s_id, local_fid);
864 hton24(fh->fh_d_id, remote_fid);
865 lport->tt.frame_send(lport, fp);
866
867 } else {
868 fc_lport_error(lport, fp);
869 }
870 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
871 get_unaligned_be64(&flp->fl_wwnn));
872 out:
873 fc_frame_free(rx_fp);
874 }
875
876 /**
877 * fc_lport_recv_els_req() - The generic lport ELS request handler
878 * @lport: The local port that received the request
879 * @fp: The request frame
880 *
881 * This function will see if the lport handles the request or
882 * if an rport should handle the request.
883 *
884 * Locking Note: This function should not be called with the lport
885 * lock held because it will grab the lock.
886 */
887 static void fc_lport_recv_els_req(struct fc_lport *lport,
888 struct fc_frame *fp)
889 {
890 void (*recv)(struct fc_lport *, struct fc_frame *);
891
892 mutex_lock(&lport->lp_mutex);
893
894 /*
895 * Handle special ELS cases like FLOGI, LOGO, and
896 * RSCN here. These don't require a session.
897 * Even if we had a session, it might not be ready.
898 */
899 if (!lport->link_up)
900 fc_frame_free(fp);
901 else {
902 /*
903 * Check opcode.
904 */
905 recv = lport->tt.rport_recv_req;
906 switch (fc_frame_payload_op(fp)) {
907 case ELS_FLOGI:
908 if (!lport->point_to_multipoint)
909 recv = fc_lport_recv_flogi_req;
910 break;
911 case ELS_LOGO:
912 if (fc_frame_sid(fp) == FC_FID_FLOGI)
913 recv = fc_lport_recv_logo_req;
914 break;
915 case ELS_RSCN:
916 recv = lport->tt.disc_recv_req;
917 break;
918 case ELS_ECHO:
919 recv = fc_lport_recv_echo_req;
920 break;
921 case ELS_RLIR:
922 recv = fc_lport_recv_rlir_req;
923 break;
924 case ELS_RNID:
925 recv = fc_lport_recv_rnid_req;
926 break;
927 }
928
929 recv(lport, fp);
930 }
931 mutex_unlock(&lport->lp_mutex);
932 }
933
934 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
935 const struct fc_els_spp *spp_in,
936 struct fc_els_spp *spp_out)
937 {
938 return FC_SPP_RESP_INVL;
939 }
940
941 struct fc4_prov fc_lport_els_prov = {
942 .prli = fc_lport_els_prli,
943 .recv = fc_lport_recv_els_req,
944 };
945
946 /**
947 * fc_lport_recv_req() - The generic lport request handler
948 * @lport: The lport that received the request
949 * @fp: The frame the request is in
950 *
951 * Locking Note: This function should not be called with the lport
952 * lock held because it may grab the lock.
953 */
954 static void fc_lport_recv_req(struct fc_lport *lport,
955 struct fc_frame *fp)
956 {
957 struct fc_frame_header *fh = fc_frame_header_get(fp);
958 struct fc_seq *sp = fr_seq(fp);
959 struct fc4_prov *prov;
960
961 /*
962 * Use RCU read lock and module_lock to be sure module doesn't
963 * deregister and get unloaded while we're calling it.
964 * try_module_get() is inlined and accepts a NULL parameter.
965 * Only ELSes and FCP target ops should come through here.
966 * The locking is unfortunate, and a better scheme is being sought.
967 */
968
969 rcu_read_lock();
970 if (fh->fh_type >= FC_FC4_PROV_SIZE)
971 goto drop;
972 prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
973 if (!prov || !try_module_get(prov->module))
974 goto drop;
975 rcu_read_unlock();
976 prov->recv(lport, fp);
977 module_put(prov->module);
978 return;
979 drop:
980 rcu_read_unlock();
981 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
982 fc_frame_free(fp);
983 if (sp)
984 lport->tt.exch_done(sp);
985 }
986
987 /**
988 * fc_lport_reset() - Reset a local port
989 * @lport: The local port which should be reset
990 *
991 * Locking Note: This functions should not be called with the
992 * lport lock held.
993 */
994 int fc_lport_reset(struct fc_lport *lport)
995 {
996 cancel_delayed_work_sync(&lport->retry_work);
997 mutex_lock(&lport->lp_mutex);
998 fc_lport_enter_reset(lport);
999 mutex_unlock(&lport->lp_mutex);
1000 return 0;
1001 }
1002 EXPORT_SYMBOL(fc_lport_reset);
1003
1004 /**
1005 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
1006 * @lport: The local port to be reset
1007 *
1008 * Locking Note: The lport lock is expected to be held before calling
1009 * this routine.
1010 */
1011 static void fc_lport_reset_locked(struct fc_lport *lport)
1012 {
1013 if (lport->dns_rdata)
1014 lport->tt.rport_logoff(lport->dns_rdata);
1015
1016 if (lport->ptp_rdata) {
1017 lport->tt.rport_logoff(lport->ptp_rdata);
1018 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
1019 lport->ptp_rdata = NULL;
1020 }
1021
1022 lport->tt.disc_stop(lport);
1023
1024 lport->tt.exch_mgr_reset(lport, 0, 0);
1025 fc_host_fabric_name(lport->host) = 0;
1026
1027 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
1028 fc_lport_set_port_id(lport, 0, NULL);
1029 }
1030
1031 /**
1032 * fc_lport_enter_reset() - Reset the local port
1033 * @lport: The local port to be reset
1034 *
1035 * Locking Note: The lport lock is expected to be held before calling
1036 * this routine.
1037 */
1038 static void fc_lport_enter_reset(struct fc_lport *lport)
1039 {
1040 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
1041 fc_lport_state(lport));
1042
1043 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
1044 return;
1045
1046 if (lport->vport) {
1047 if (lport->link_up)
1048 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
1049 else
1050 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1051 }
1052 fc_lport_state_enter(lport, LPORT_ST_RESET);
1053 fc_host_post_event(lport->host, fc_get_event_number(),
1054 FCH_EVT_LIPRESET, 0);
1055 fc_vports_linkchange(lport);
1056 fc_lport_reset_locked(lport);
1057 if (lport->link_up)
1058 fc_lport_enter_flogi(lport);
1059 }
1060
1061 /**
1062 * fc_lport_enter_disabled() - Disable the local port
1063 * @lport: The local port to be reset
1064 *
1065 * Locking Note: The lport lock is expected to be held before calling
1066 * this routine.
1067 */
1068 static void fc_lport_enter_disabled(struct fc_lport *lport)
1069 {
1070 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1071 fc_lport_state(lport));
1072
1073 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1074 fc_vports_linkchange(lport);
1075 fc_lport_reset_locked(lport);
1076 }
1077
1078 /**
1079 * fc_lport_error() - Handler for any errors
1080 * @lport: The local port that the error was on
1081 * @fp: The error code encoded in a frame pointer
1082 *
1083 * If the error was caused by a resource allocation failure
1084 * then wait for half a second and retry, otherwise retry
1085 * after the e_d_tov time.
1086 */
1087 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1088 {
1089 unsigned long delay = 0;
1090 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1091 PTR_ERR(fp), fc_lport_state(lport),
1092 lport->retry_count);
1093
1094 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1095 return;
1096
1097 /*
1098 * Memory allocation failure, or the exchange timed out
1099 * or we received LS_RJT.
1100 * Retry after delay
1101 */
1102 if (lport->retry_count < lport->max_retry_count) {
1103 lport->retry_count++;
1104 if (!fp)
1105 delay = msecs_to_jiffies(500);
1106 else
1107 delay = msecs_to_jiffies(lport->e_d_tov);
1108
1109 schedule_delayed_work(&lport->retry_work, delay);
1110 } else
1111 fc_lport_enter_reset(lport);
1112 }
1113
1114 /**
1115 * fc_lport_ns_resp() - Handle response to a name server
1116 * registration exchange
1117 * @sp: current sequence in exchange
1118 * @fp: response frame
1119 * @lp_arg: Fibre Channel host port instance
1120 *
1121 * Locking Note: This function will be called without the lport lock
1122 * held, but it will lock, call an _enter_* function or fc_lport_error()
1123 * and then unlock the lport.
1124 */
1125 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1126 void *lp_arg)
1127 {
1128 struct fc_lport *lport = lp_arg;
1129 struct fc_frame_header *fh;
1130 struct fc_ct_hdr *ct;
1131
1132 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1133
1134 if (fp == ERR_PTR(-FC_EX_CLOSED))
1135 return;
1136
1137 mutex_lock(&lport->lp_mutex);
1138
1139 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1140 FC_LPORT_DBG(lport, "Received a name server response, "
1141 "but in state %s\n", fc_lport_state(lport));
1142 if (IS_ERR(fp))
1143 goto err;
1144 goto out;
1145 }
1146
1147 if (IS_ERR(fp)) {
1148 fc_lport_error(lport, fp);
1149 goto err;
1150 }
1151
1152 fh = fc_frame_header_get(fp);
1153 ct = fc_frame_payload_get(fp, sizeof(*ct));
1154
1155 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1156 ct->ct_fs_type == FC_FST_DIR &&
1157 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1158 ntohs(ct->ct_cmd) == FC_FS_ACC)
1159 switch (lport->state) {
1160 case LPORT_ST_RNN_ID:
1161 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1162 break;
1163 case LPORT_ST_RSNN_NN:
1164 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1165 break;
1166 case LPORT_ST_RSPN_ID:
1167 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1168 break;
1169 case LPORT_ST_RFT_ID:
1170 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1171 break;
1172 case LPORT_ST_RFF_ID:
1173 if (lport->fdmi_enabled)
1174 fc_lport_enter_fdmi(lport);
1175 else
1176 fc_lport_enter_scr(lport);
1177 break;
1178 default:
1179 /* should have already been caught by state checks */
1180 break;
1181 }
1182 else
1183 fc_lport_error(lport, fp);
1184 out:
1185 fc_frame_free(fp);
1186 err:
1187 mutex_unlock(&lport->lp_mutex);
1188 }
1189
1190 /**
1191 * fc_lport_ms_resp() - Handle response to a management server
1192 * exchange
1193 * @sp: current sequence in exchange
1194 * @fp: response frame
1195 * @lp_arg: Fibre Channel host port instance
1196 *
1197 * Locking Note: This function will be called without the lport lock
1198 * held, but it will lock, call an _enter_* function or fc_lport_error()
1199 * and then unlock the lport.
1200 */
1201 static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
1202 void *lp_arg)
1203 {
1204 struct fc_lport *lport = lp_arg;
1205 struct fc_frame_header *fh;
1206 struct fc_ct_hdr *ct;
1207
1208 FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
1209
1210 if (fp == ERR_PTR(-FC_EX_CLOSED))
1211 return;
1212
1213 mutex_lock(&lport->lp_mutex);
1214
1215 if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
1216 FC_LPORT_DBG(lport, "Received a management server response, "
1217 "but in state %s\n", fc_lport_state(lport));
1218 if (IS_ERR(fp))
1219 goto err;
1220 goto out;
1221 }
1222
1223 if (IS_ERR(fp)) {
1224 fc_lport_error(lport, fp);
1225 goto err;
1226 }
1227
1228 fh = fc_frame_header_get(fp);
1229 ct = fc_frame_payload_get(fp, sizeof(*ct));
1230
1231 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1232 ct->ct_fs_type == FC_FST_MGMT &&
1233 ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
1234 FC_LPORT_DBG(lport, "Received a management server response, "
1235 "reason=%d explain=%d\n",
1236 ct->ct_reason,
1237 ct->ct_explan);
1238
1239 switch (lport->state) {
1240 case LPORT_ST_RHBA:
1241 if (ntohs(ct->ct_cmd) == FC_FS_ACC)
1242 fc_lport_enter_ms(lport, LPORT_ST_RPA);
1243 else /* Error Skip RPA */
1244 fc_lport_enter_scr(lport);
1245 break;
1246 case LPORT_ST_RPA:
1247 fc_lport_enter_scr(lport);
1248 break;
1249 case LPORT_ST_DPRT:
1250 fc_lport_enter_ms(lport, LPORT_ST_RHBA);
1251 break;
1252 case LPORT_ST_DHBA:
1253 fc_lport_enter_ms(lport, LPORT_ST_DPRT);
1254 break;
1255 default:
1256 /* should have already been caught by state checks */
1257 break;
1258 }
1259 } else {
1260 /* Invalid Frame? */
1261 fc_lport_error(lport, fp);
1262 }
1263 out:
1264 fc_frame_free(fp);
1265 err:
1266 mutex_unlock(&lport->lp_mutex);
1267 }
1268
1269 /**
1270 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1271 * @sp: current sequence in SCR exchange
1272 * @fp: response frame
1273 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1274 *
1275 * Locking Note: This function will be called without the lport lock
1276 * held, but it will lock, call an _enter_* function or fc_lport_error
1277 * and then unlock the lport.
1278 */
1279 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1280 void *lp_arg)
1281 {
1282 struct fc_lport *lport = lp_arg;
1283 u8 op;
1284
1285 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1286
1287 if (fp == ERR_PTR(-FC_EX_CLOSED))
1288 return;
1289
1290 mutex_lock(&lport->lp_mutex);
1291
1292 if (lport->state != LPORT_ST_SCR) {
1293 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1294 "%s\n", fc_lport_state(lport));
1295 if (IS_ERR(fp))
1296 goto err;
1297 goto out;
1298 }
1299
1300 if (IS_ERR(fp)) {
1301 fc_lport_error(lport, fp);
1302 goto err;
1303 }
1304
1305 op = fc_frame_payload_op(fp);
1306 if (op == ELS_LS_ACC)
1307 fc_lport_enter_ready(lport);
1308 else
1309 fc_lport_error(lport, fp);
1310
1311 out:
1312 fc_frame_free(fp);
1313 err:
1314 mutex_unlock(&lport->lp_mutex);
1315 }
1316
1317 /**
1318 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1319 * @lport: The local port to register for state changes
1320 *
1321 * Locking Note: The lport lock is expected to be held before calling
1322 * this routine.
1323 */
1324 static void fc_lport_enter_scr(struct fc_lport *lport)
1325 {
1326 struct fc_frame *fp;
1327
1328 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1329 fc_lport_state(lport));
1330
1331 fc_lport_state_enter(lport, LPORT_ST_SCR);
1332
1333 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1334 if (!fp) {
1335 fc_lport_error(lport, fp);
1336 return;
1337 }
1338
1339 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1340 fc_lport_scr_resp, lport,
1341 2 * lport->r_a_tov))
1342 fc_lport_error(lport, NULL);
1343 }
1344
1345 /**
1346 * fc_lport_enter_ns() - register some object with the name server
1347 * @lport: Fibre Channel local port to register
1348 *
1349 * Locking Note: The lport lock is expected to be held before calling
1350 * this routine.
1351 */
1352 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1353 {
1354 struct fc_frame *fp;
1355 enum fc_ns_req cmd;
1356 int size = sizeof(struct fc_ct_hdr);
1357 size_t len;
1358
1359 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1360 fc_lport_state_names[state],
1361 fc_lport_state(lport));
1362
1363 fc_lport_state_enter(lport, state);
1364
1365 switch (state) {
1366 case LPORT_ST_RNN_ID:
1367 cmd = FC_NS_RNN_ID;
1368 size += sizeof(struct fc_ns_rn_id);
1369 break;
1370 case LPORT_ST_RSNN_NN:
1371 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1372 /* if there is no symbolic name, skip to RFT_ID */
1373 if (!len)
1374 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1375 cmd = FC_NS_RSNN_NN;
1376 size += sizeof(struct fc_ns_rsnn) + len;
1377 break;
1378 case LPORT_ST_RSPN_ID:
1379 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1380 /* if there is no symbolic name, skip to RFT_ID */
1381 if (!len)
1382 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1383 cmd = FC_NS_RSPN_ID;
1384 size += sizeof(struct fc_ns_rspn) + len;
1385 break;
1386 case LPORT_ST_RFT_ID:
1387 cmd = FC_NS_RFT_ID;
1388 size += sizeof(struct fc_ns_rft);
1389 break;
1390 case LPORT_ST_RFF_ID:
1391 cmd = FC_NS_RFF_ID;
1392 size += sizeof(struct fc_ns_rff_id);
1393 break;
1394 default:
1395 fc_lport_error(lport, NULL);
1396 return;
1397 }
1398
1399 fp = fc_frame_alloc(lport, size);
1400 if (!fp) {
1401 fc_lport_error(lport, fp);
1402 return;
1403 }
1404
1405 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1406 fc_lport_ns_resp,
1407 lport, 3 * lport->r_a_tov))
1408 fc_lport_error(lport, fp);
1409 }
1410
1411 static struct fc_rport_operations fc_lport_rport_ops = {
1412 .event_callback = fc_lport_rport_callback,
1413 };
1414
1415 /**
1416 * fc_rport_enter_dns() - Create a fc_rport for the name server
1417 * @lport: The local port requesting a remote port for the name server
1418 *
1419 * Locking Note: The lport lock is expected to be held before calling
1420 * this routine.
1421 */
1422 static void fc_lport_enter_dns(struct fc_lport *lport)
1423 {
1424 struct fc_rport_priv *rdata;
1425
1426 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1427 fc_lport_state(lport));
1428
1429 fc_lport_state_enter(lport, LPORT_ST_DNS);
1430
1431 mutex_lock(&lport->disc.disc_mutex);
1432 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1433 mutex_unlock(&lport->disc.disc_mutex);
1434 if (!rdata)
1435 goto err;
1436
1437 rdata->ops = &fc_lport_rport_ops;
1438 lport->tt.rport_login(rdata);
1439 return;
1440
1441 err:
1442 fc_lport_error(lport, NULL);
1443 }
1444
1445 /**
1446 * fc_lport_enter_ms() - management server commands
1447 * @lport: Fibre Channel local port to register
1448 *
1449 * Locking Note: The lport lock is expected to be held before calling
1450 * this routine.
1451 */
1452 static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
1453 {
1454 struct fc_frame *fp;
1455 enum fc_fdmi_req cmd;
1456 int size = sizeof(struct fc_ct_hdr);
1457 size_t len;
1458 int numattrs;
1459
1460 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1461 fc_lport_state_names[state],
1462 fc_lport_state(lport));
1463
1464 fc_lport_state_enter(lport, state);
1465
1466 switch (state) {
1467 case LPORT_ST_RHBA:
1468 cmd = FC_FDMI_RHBA;
1469 /* Number of HBA Attributes */
1470 numattrs = 10;
1471 len = sizeof(struct fc_fdmi_rhba);
1472 len -= sizeof(struct fc_fdmi_attr_entry);
1473 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1474 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
1475 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
1476 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
1477 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
1478 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
1479 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
1480 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
1481 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
1482 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
1483 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
1484
1485 size += len;
1486 break;
1487 case LPORT_ST_RPA:
1488 cmd = FC_FDMI_RPA;
1489 /* Number of Port Attributes */
1490 numattrs = 6;
1491 len = sizeof(struct fc_fdmi_rpa);
1492 len -= sizeof(struct fc_fdmi_attr_entry);
1493 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1494 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
1495 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
1496 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
1497 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
1498 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
1499 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
1500
1501 size += len;
1502 break;
1503 case LPORT_ST_DPRT:
1504 cmd = FC_FDMI_DPRT;
1505 len = sizeof(struct fc_fdmi_dprt);
1506 size += len;
1507 break;
1508 case LPORT_ST_DHBA:
1509 cmd = FC_FDMI_DHBA;
1510 len = sizeof(struct fc_fdmi_dhba);
1511 size += len;
1512 break;
1513 default:
1514 fc_lport_error(lport, NULL);
1515 return;
1516 }
1517
1518 FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
1519 cmd, (int)len, size);
1520 fp = fc_frame_alloc(lport, size);
1521 if (!fp) {
1522 fc_lport_error(lport, fp);
1523 return;
1524 }
1525
1526 if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
1527 fc_lport_ms_resp,
1528 lport, 3 * lport->r_a_tov))
1529 fc_lport_error(lport, fp);
1530 }
1531
1532 /**
1533 * fc_rport_enter_fdmi() - Create a fc_rport for the management server
1534 * @lport: The local port requesting a remote port for the management server
1535 *
1536 * Locking Note: The lport lock is expected to be held before calling
1537 * this routine.
1538 */
1539 static void fc_lport_enter_fdmi(struct fc_lport *lport)
1540 {
1541 struct fc_rport_priv *rdata;
1542
1543 FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
1544 fc_lport_state(lport));
1545
1546 fc_lport_state_enter(lport, LPORT_ST_FDMI);
1547
1548 mutex_lock(&lport->disc.disc_mutex);
1549 rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
1550 mutex_unlock(&lport->disc.disc_mutex);
1551 if (!rdata)
1552 goto err;
1553
1554 rdata->ops = &fc_lport_rport_ops;
1555 lport->tt.rport_login(rdata);
1556 return;
1557
1558 err:
1559 fc_lport_error(lport, NULL);
1560 }
1561
1562 /**
1563 * fc_lport_timeout() - Handler for the retry_work timer
1564 * @work: The work struct of the local port
1565 */
1566 static void fc_lport_timeout(struct work_struct *work)
1567 {
1568 struct fc_lport *lport =
1569 container_of(work, struct fc_lport,
1570 retry_work.work);
1571
1572 mutex_lock(&lport->lp_mutex);
1573
1574 switch (lport->state) {
1575 case LPORT_ST_DISABLED:
1576 break;
1577 case LPORT_ST_READY:
1578 break;
1579 case LPORT_ST_RESET:
1580 break;
1581 case LPORT_ST_FLOGI:
1582 fc_lport_enter_flogi(lport);
1583 break;
1584 case LPORT_ST_DNS:
1585 fc_lport_enter_dns(lport);
1586 break;
1587 case LPORT_ST_RNN_ID:
1588 case LPORT_ST_RSNN_NN:
1589 case LPORT_ST_RSPN_ID:
1590 case LPORT_ST_RFT_ID:
1591 case LPORT_ST_RFF_ID:
1592 fc_lport_enter_ns(lport, lport->state);
1593 break;
1594 case LPORT_ST_FDMI:
1595 fc_lport_enter_fdmi(lport);
1596 break;
1597 case LPORT_ST_RHBA:
1598 case LPORT_ST_RPA:
1599 case LPORT_ST_DHBA:
1600 case LPORT_ST_DPRT:
1601 FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
1602 fc_lport_state(lport));
1603 /* fall thru */
1604 case LPORT_ST_SCR:
1605 fc_lport_enter_scr(lport);
1606 break;
1607 case LPORT_ST_LOGO:
1608 fc_lport_enter_logo(lport);
1609 break;
1610 }
1611
1612 mutex_unlock(&lport->lp_mutex);
1613 }
1614
1615 /**
1616 * fc_lport_logo_resp() - Handle response to LOGO request
1617 * @sp: The sequence that the LOGO was on
1618 * @fp: The LOGO frame
1619 * @lp_arg: The lport port that received the LOGO request
1620 *
1621 * Locking Note: This function will be called without the lport lock
1622 * held, but it will lock, call an _enter_* function or fc_lport_error()
1623 * and then unlock the lport.
1624 */
1625 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1626 void *lp_arg)
1627 {
1628 struct fc_lport *lport = lp_arg;
1629 u8 op;
1630
1631 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1632
1633 if (fp == ERR_PTR(-FC_EX_CLOSED))
1634 return;
1635
1636 mutex_lock(&lport->lp_mutex);
1637
1638 if (lport->state != LPORT_ST_LOGO) {
1639 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1640 "%s\n", fc_lport_state(lport));
1641 if (IS_ERR(fp))
1642 goto err;
1643 goto out;
1644 }
1645
1646 if (IS_ERR(fp)) {
1647 fc_lport_error(lport, fp);
1648 goto err;
1649 }
1650
1651 op = fc_frame_payload_op(fp);
1652 if (op == ELS_LS_ACC)
1653 fc_lport_enter_disabled(lport);
1654 else
1655 fc_lport_error(lport, fp);
1656
1657 out:
1658 fc_frame_free(fp);
1659 err:
1660 mutex_unlock(&lport->lp_mutex);
1661 }
1662 EXPORT_SYMBOL(fc_lport_logo_resp);
1663
1664 /**
1665 * fc_rport_enter_logo() - Logout of the fabric
1666 * @lport: The local port to be logged out
1667 *
1668 * Locking Note: The lport lock is expected to be held before calling
1669 * this routine.
1670 */
1671 static void fc_lport_enter_logo(struct fc_lport *lport)
1672 {
1673 struct fc_frame *fp;
1674 struct fc_els_logo *logo;
1675
1676 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1677 fc_lport_state(lport));
1678
1679 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1680 fc_vports_linkchange(lport);
1681
1682 fp = fc_frame_alloc(lport, sizeof(*logo));
1683 if (!fp) {
1684 fc_lport_error(lport, fp);
1685 return;
1686 }
1687
1688 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1689 fc_lport_logo_resp, lport,
1690 2 * lport->r_a_tov))
1691 fc_lport_error(lport, NULL);
1692 }
1693
1694 /**
1695 * fc_lport_flogi_resp() - Handle response to FLOGI request
1696 * @sp: The sequence that the FLOGI was on
1697 * @fp: The FLOGI response frame
1698 * @lp_arg: The lport port that received the FLOGI response
1699 *
1700 * Locking Note: This function will be called without the lport lock
1701 * held, but it will lock, call an _enter_* function or fc_lport_error()
1702 * and then unlock the lport.
1703 */
1704 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1705 void *lp_arg)
1706 {
1707 struct fc_lport *lport = lp_arg;
1708 struct fc_frame_header *fh;
1709 struct fc_els_flogi *flp;
1710 u32 did;
1711 u16 csp_flags;
1712 unsigned int r_a_tov;
1713 unsigned int e_d_tov;
1714 u16 mfs;
1715
1716 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1717
1718 if (fp == ERR_PTR(-FC_EX_CLOSED))
1719 return;
1720
1721 mutex_lock(&lport->lp_mutex);
1722
1723 if (lport->state != LPORT_ST_FLOGI) {
1724 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1725 "%s\n", fc_lport_state(lport));
1726 if (IS_ERR(fp))
1727 goto err;
1728 goto out;
1729 }
1730
1731 if (IS_ERR(fp)) {
1732 fc_lport_error(lport, fp);
1733 goto err;
1734 }
1735
1736 fh = fc_frame_header_get(fp);
1737 did = fc_frame_did(fp);
1738 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
1739 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1740 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1741 fc_lport_error(lport, fp);
1742 goto err;
1743 }
1744
1745 flp = fc_frame_payload_get(fp, sizeof(*flp));
1746 if (!flp) {
1747 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1748 fc_lport_error(lport, fp);
1749 goto err;
1750 }
1751
1752 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1753 FC_SP_BB_DATA_MASK;
1754
1755 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
1756 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1757 "lport->mfs:%hu\n", mfs, lport->mfs);
1758 fc_lport_error(lport, fp);
1759 goto err;
1760 }
1761
1762 if (mfs <= lport->mfs) {
1763 lport->mfs = mfs;
1764 fc_host_maxframe_size(lport->host) = mfs;
1765 }
1766
1767 csp_flags = ntohs(flp->fl_csp.sp_features);
1768 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1769 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1770 if (csp_flags & FC_SP_FT_EDTR)
1771 e_d_tov /= 1000000;
1772
1773 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1774
1775 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1776 if (e_d_tov > lport->e_d_tov)
1777 lport->e_d_tov = e_d_tov;
1778 lport->r_a_tov = 2 * e_d_tov;
1779 fc_lport_set_port_id(lport, did, fp);
1780 printk(KERN_INFO "host%d: libfc: "
1781 "Port (%6.6x) entered "
1782 "point-to-point mode\n",
1783 lport->host->host_no, did);
1784 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1785 get_unaligned_be64(
1786 &flp->fl_wwpn),
1787 get_unaligned_be64(
1788 &flp->fl_wwnn));
1789 } else {
1790 lport->e_d_tov = e_d_tov;
1791 lport->r_a_tov = r_a_tov;
1792 fc_host_fabric_name(lport->host) =
1793 get_unaligned_be64(&flp->fl_wwnn);
1794 fc_lport_set_port_id(lport, did, fp);
1795 fc_lport_enter_dns(lport);
1796 }
1797
1798 out:
1799 fc_frame_free(fp);
1800 err:
1801 mutex_unlock(&lport->lp_mutex);
1802 }
1803 EXPORT_SYMBOL(fc_lport_flogi_resp);
1804
1805 /**
1806 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1807 * @lport: Fibre Channel local port to be logged in to the fabric
1808 *
1809 * Locking Note: The lport lock is expected to be held before calling
1810 * this routine.
1811 */
1812 static void fc_lport_enter_flogi(struct fc_lport *lport)
1813 {
1814 struct fc_frame *fp;
1815
1816 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1817 fc_lport_state(lport));
1818
1819 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1820
1821 if (lport->point_to_multipoint) {
1822 if (lport->port_id)
1823 fc_lport_enter_ready(lport);
1824 return;
1825 }
1826
1827 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1828 if (!fp)
1829 return fc_lport_error(lport, fp);
1830
1831 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1832 lport->vport ? ELS_FDISC : ELS_FLOGI,
1833 fc_lport_flogi_resp, lport,
1834 lport->vport ? 2 * lport->r_a_tov :
1835 lport->e_d_tov))
1836 fc_lport_error(lport, NULL);
1837 }
1838
1839 /**
1840 * fc_lport_config() - Configure a fc_lport
1841 * @lport: The local port to be configured
1842 */
1843 int fc_lport_config(struct fc_lport *lport)
1844 {
1845 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1846 mutex_init(&lport->lp_mutex);
1847
1848 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1849
1850 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1851 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1852 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
1853
1854 return 0;
1855 }
1856 EXPORT_SYMBOL(fc_lport_config);
1857
1858 /**
1859 * fc_lport_init() - Initialize the lport layer for a local port
1860 * @lport: The local port to initialize the exchange layer for
1861 */
1862 int fc_lport_init(struct fc_lport *lport)
1863 {
1864 if (!lport->tt.lport_recv)
1865 lport->tt.lport_recv = fc_lport_recv_req;
1866
1867 if (!lport->tt.lport_reset)
1868 lport->tt.lport_reset = fc_lport_reset;
1869
1870 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1871 fc_host_node_name(lport->host) = lport->wwnn;
1872 fc_host_port_name(lport->host) = lport->wwpn;
1873 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1874 memset(fc_host_supported_fc4s(lport->host), 0,
1875 sizeof(fc_host_supported_fc4s(lport->host)));
1876 fc_host_supported_fc4s(lport->host)[2] = 1;
1877 fc_host_supported_fc4s(lport->host)[7] = 1;
1878
1879 /* This value is also unchanging */
1880 memset(fc_host_active_fc4s(lport->host), 0,
1881 sizeof(fc_host_active_fc4s(lport->host)));
1882 fc_host_active_fc4s(lport->host)[2] = 1;
1883 fc_host_active_fc4s(lport->host)[7] = 1;
1884 fc_host_maxframe_size(lport->host) = lport->mfs;
1885 fc_host_supported_speeds(lport->host) = 0;
1886 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1887 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1888 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1889 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1890 fc_fc4_add_lport(lport);
1891
1892 return 0;
1893 }
1894 EXPORT_SYMBOL(fc_lport_init);
1895
1896 /**
1897 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1898 * @sp: The sequence for the FC Passthrough response
1899 * @fp: The response frame
1900 * @info_arg: The BSG info that the response is for
1901 */
1902 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1903 void *info_arg)
1904 {
1905 struct fc_bsg_info *info = info_arg;
1906 struct fc_bsg_job *job = info->job;
1907 struct fc_lport *lport = info->lport;
1908 struct fc_frame_header *fh;
1909 size_t len;
1910 void *buf;
1911
1912 if (IS_ERR(fp)) {
1913 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1914 -ECONNABORTED : -ETIMEDOUT;
1915 job->reply_len = sizeof(uint32_t);
1916 job->state_flags |= FC_RQST_STATE_DONE;
1917 job->job_done(job);
1918 kfree(info);
1919 return;
1920 }
1921
1922 mutex_lock(&lport->lp_mutex);
1923 fh = fc_frame_header_get(fp);
1924 len = fr_len(fp) - sizeof(*fh);
1925 buf = fc_frame_payload_get(fp, 0);
1926
1927 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1928 /* Get the response code from the first frame payload */
1929 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1930 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1931 (unsigned short)fc_frame_payload_op(fp);
1932
1933 /* Save the reply status of the job */
1934 job->reply->reply_data.ctels_reply.status =
1935 (cmd == info->rsp_code) ?
1936 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1937 }
1938
1939 job->reply->reply_payload_rcv_len +=
1940 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1941 &info->offset, NULL);
1942
1943 if (fr_eof(fp) == FC_EOF_T &&
1944 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1945 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1946 if (job->reply->reply_payload_rcv_len >
1947 job->reply_payload.payload_len)
1948 job->reply->reply_payload_rcv_len =
1949 job->reply_payload.payload_len;
1950 job->reply->result = 0;
1951 job->state_flags |= FC_RQST_STATE_DONE;
1952 job->job_done(job);
1953 kfree(info);
1954 }
1955 fc_frame_free(fp);
1956 mutex_unlock(&lport->lp_mutex);
1957 }
1958
1959 /**
1960 * fc_lport_els_request() - Send ELS passthrough request
1961 * @job: The BSG Passthrough job
1962 * @lport: The local port sending the request
1963 * @did: The destination port id
1964 *
1965 * Locking Note: The lport lock is expected to be held before calling
1966 * this routine.
1967 */
1968 static int fc_lport_els_request(struct fc_bsg_job *job,
1969 struct fc_lport *lport,
1970 u32 did, u32 tov)
1971 {
1972 struct fc_bsg_info *info;
1973 struct fc_frame *fp;
1974 struct fc_frame_header *fh;
1975 char *pp;
1976 int len;
1977
1978 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1979 if (!fp)
1980 return -ENOMEM;
1981
1982 len = job->request_payload.payload_len;
1983 pp = fc_frame_payload_get(fp, len);
1984
1985 sg_copy_to_buffer(job->request_payload.sg_list,
1986 job->request_payload.sg_cnt,
1987 pp, len);
1988
1989 fh = fc_frame_header_get(fp);
1990 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1991 hton24(fh->fh_d_id, did);
1992 hton24(fh->fh_s_id, lport->port_id);
1993 fh->fh_type = FC_TYPE_ELS;
1994 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1995 fh->fh_cs_ctl = 0;
1996 fh->fh_df_ctl = 0;
1997 fh->fh_parm_offset = 0;
1998
1999 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
2000 if (!info) {
2001 fc_frame_free(fp);
2002 return -ENOMEM;
2003 }
2004
2005 info->job = job;
2006 info->lport = lport;
2007 info->rsp_code = ELS_LS_ACC;
2008 info->nents = job->reply_payload.sg_cnt;
2009 info->sg = job->reply_payload.sg_list;
2010
2011 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
2012 NULL, info, tov)) {
2013 kfree(info);
2014 return -ECOMM;
2015 }
2016 return 0;
2017 }
2018
2019 /**
2020 * fc_lport_ct_request() - Send CT Passthrough request
2021 * @job: The BSG Passthrough job
2022 * @lport: The local port sending the request
2023 * @did: The destination FC-ID
2024 * @tov: The timeout period to wait for the response
2025 *
2026 * Locking Note: The lport lock is expected to be held before calling
2027 * this routine.
2028 */
2029 static int fc_lport_ct_request(struct fc_bsg_job *job,
2030 struct fc_lport *lport, u32 did, u32 tov)
2031 {
2032 struct fc_bsg_info *info;
2033 struct fc_frame *fp;
2034 struct fc_frame_header *fh;
2035 struct fc_ct_req *ct;
2036 size_t len;
2037
2038 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
2039 job->request_payload.payload_len);
2040 if (!fp)
2041 return -ENOMEM;
2042
2043 len = job->request_payload.payload_len;
2044 ct = fc_frame_payload_get(fp, len);
2045
2046 sg_copy_to_buffer(job->request_payload.sg_list,
2047 job->request_payload.sg_cnt,
2048 ct, len);
2049
2050 fh = fc_frame_header_get(fp);
2051 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
2052 hton24(fh->fh_d_id, did);
2053 hton24(fh->fh_s_id, lport->port_id);
2054 fh->fh_type = FC_TYPE_CT;
2055 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
2056 fh->fh_cs_ctl = 0;
2057 fh->fh_df_ctl = 0;
2058 fh->fh_parm_offset = 0;
2059
2060 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
2061 if (!info) {
2062 fc_frame_free(fp);
2063 return -ENOMEM;
2064 }
2065
2066 info->job = job;
2067 info->lport = lport;
2068 info->rsp_code = FC_FS_ACC;
2069 info->nents = job->reply_payload.sg_cnt;
2070 info->sg = job->reply_payload.sg_list;
2071
2072 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
2073 NULL, info, tov)) {
2074 kfree(info);
2075 return -ECOMM;
2076 }
2077 return 0;
2078 }
2079
2080 /**
2081 * fc_lport_bsg_request() - The common entry point for sending
2082 * FC Passthrough requests
2083 * @job: The BSG passthrough job
2084 */
2085 int fc_lport_bsg_request(struct fc_bsg_job *job)
2086 {
2087 struct request *rsp = job->req->next_rq;
2088 struct Scsi_Host *shost = job->shost;
2089 struct fc_lport *lport = shost_priv(shost);
2090 struct fc_rport *rport;
2091 struct fc_rport_priv *rdata;
2092 int rc = -EINVAL;
2093 u32 did;
2094
2095 job->reply->reply_payload_rcv_len = 0;
2096 if (rsp)
2097 rsp->resid_len = job->reply_payload.payload_len;
2098
2099 mutex_lock(&lport->lp_mutex);
2100
2101 switch (job->request->msgcode) {
2102 case FC_BSG_RPT_ELS:
2103 rport = job->rport;
2104 if (!rport)
2105 break;
2106
2107 rdata = rport->dd_data;
2108 rc = fc_lport_els_request(job, lport, rport->port_id,
2109 rdata->e_d_tov);
2110 break;
2111
2112 case FC_BSG_RPT_CT:
2113 rport = job->rport;
2114 if (!rport)
2115 break;
2116
2117 rdata = rport->dd_data;
2118 rc = fc_lport_ct_request(job, lport, rport->port_id,
2119 rdata->e_d_tov);
2120 break;
2121
2122 case FC_BSG_HST_CT:
2123 did = ntoh24(job->request->rqst_data.h_ct.port_id);
2124 if (did == FC_FID_DIR_SERV)
2125 rdata = lport->dns_rdata;
2126 else
2127 rdata = lport->tt.rport_lookup(lport, did);
2128
2129 if (!rdata)
2130 break;
2131
2132 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
2133 break;
2134
2135 case FC_BSG_HST_ELS_NOLOGIN:
2136 did = ntoh24(job->request->rqst_data.h_els.port_id);
2137 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
2138 break;
2139 }
2140
2141 mutex_unlock(&lport->lp_mutex);
2142 return rc;
2143 }
2144 EXPORT_SYMBOL(fc_lport_bsg_request);
This page took 0.076993 seconds and 5 git commands to generate.