[SCSI] libfc: add FLOGI state to rport for VN2VN
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/slab.h>
92 #include <asm/unaligned.h>
93
94 #include <scsi/fc/fc_gs.h>
95
96 #include <scsi/libfc.h>
97 #include <scsi/fc_encode.h>
98 #include <linux/scatterlist.h>
99
100 #include "fc_libfc.h"
101
102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
103 #define FC_LOCAL_PTP_FID_LO 0x010101
104 #define FC_LOCAL_PTP_FID_HI 0x010102
105
106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
107
108 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
109
110 static void fc_lport_enter_reset(struct fc_lport *);
111 static void fc_lport_enter_flogi(struct fc_lport *);
112 static void fc_lport_enter_dns(struct fc_lport *);
113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
114 static void fc_lport_enter_scr(struct fc_lport *);
115 static void fc_lport_enter_ready(struct fc_lport *);
116 static void fc_lport_enter_logo(struct fc_lport *);
117
118 static const char *fc_lport_state_names[] = {
119 [LPORT_ST_DISABLED] = "disabled",
120 [LPORT_ST_FLOGI] = "FLOGI",
121 [LPORT_ST_DNS] = "dNS",
122 [LPORT_ST_RNN_ID] = "RNN_ID",
123 [LPORT_ST_RSNN_NN] = "RSNN_NN",
124 [LPORT_ST_RSPN_ID] = "RSPN_ID",
125 [LPORT_ST_RFT_ID] = "RFT_ID",
126 [LPORT_ST_RFF_ID] = "RFF_ID",
127 [LPORT_ST_SCR] = "SCR",
128 [LPORT_ST_READY] = "Ready",
129 [LPORT_ST_LOGO] = "LOGO",
130 [LPORT_ST_RESET] = "reset",
131 };
132
133 /**
134 * struct fc_bsg_info - FC Passthrough managemet structure
135 * @job: The passthrough job
136 * @lport: The local port to pass through a command
137 * @rsp_code: The expected response code
138 * @sg: job->reply_payload.sg_list
139 * @nents: job->reply_payload.sg_cnt
140 * @offset: The offset into the response data
141 */
142 struct fc_bsg_info {
143 struct fc_bsg_job *job;
144 struct fc_lport *lport;
145 u16 rsp_code;
146 struct scatterlist *sg;
147 u32 nents;
148 size_t offset;
149 };
150
151 /**
152 * fc_frame_drop() - Dummy frame handler
153 * @lport: The local port the frame was received on
154 * @fp: The received frame
155 */
156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
157 {
158 fc_frame_free(fp);
159 return 0;
160 }
161
162 /**
163 * fc_lport_rport_callback() - Event handler for rport events
164 * @lport: The lport which is receiving the event
165 * @rdata: private remote port data
166 * @event: The event that occured
167 *
168 * Locking Note: The rport lock should not be held when calling
169 * this function.
170 */
171 static void fc_lport_rport_callback(struct fc_lport *lport,
172 struct fc_rport_priv *rdata,
173 enum fc_rport_event event)
174 {
175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
176 rdata->ids.port_id);
177
178 mutex_lock(&lport->lp_mutex);
179 switch (event) {
180 case RPORT_EV_READY:
181 if (lport->state == LPORT_ST_DNS) {
182 lport->dns_rdata = rdata;
183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
184 } else {
185 FC_LPORT_DBG(lport, "Received an READY event "
186 "on port (%6.6x) for the directory "
187 "server, but the lport is not "
188 "in the DNS state, it's in the "
189 "%d state", rdata->ids.port_id,
190 lport->state);
191 lport->tt.rport_logoff(rdata);
192 }
193 break;
194 case RPORT_EV_LOGO:
195 case RPORT_EV_FAILED:
196 case RPORT_EV_STOP:
197 lport->dns_rdata = NULL;
198 break;
199 case RPORT_EV_NONE:
200 break;
201 }
202 mutex_unlock(&lport->lp_mutex);
203 }
204
205 /**
206 * fc_lport_state() - Return a string which represents the lport's state
207 * @lport: The lport whose state is to converted to a string
208 */
209 static const char *fc_lport_state(struct fc_lport *lport)
210 {
211 const char *cp;
212
213 cp = fc_lport_state_names[lport->state];
214 if (!cp)
215 cp = "unknown";
216 return cp;
217 }
218
219 /**
220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
221 * @lport: The lport to attach the ptp rport to
222 * @remote_fid: The FID of the ptp rport
223 * @remote_wwpn: The WWPN of the ptp rport
224 * @remote_wwnn: The WWNN of the ptp rport
225 */
226 static void fc_lport_ptp_setup(struct fc_lport *lport,
227 u32 remote_fid, u64 remote_wwpn,
228 u64 remote_wwnn)
229 {
230 mutex_lock(&lport->disc.disc_mutex);
231 if (lport->ptp_rdata) {
232 lport->tt.rport_logoff(lport->ptp_rdata);
233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
234 }
235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
236 kref_get(&lport->ptp_rdata->kref);
237 lport->ptp_rdata->ids.port_name = remote_wwpn;
238 lport->ptp_rdata->ids.node_name = remote_wwnn;
239 mutex_unlock(&lport->disc.disc_mutex);
240
241 lport->tt.rport_login(lport->ptp_rdata);
242
243 fc_lport_enter_ready(lport);
244 }
245
246 /**
247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
248 * @shost: The SCSI host whose port state is to be determined
249 */
250 void fc_get_host_port_state(struct Scsi_Host *shost)
251 {
252 struct fc_lport *lport = shost_priv(shost);
253
254 mutex_lock(&lport->lp_mutex);
255 if (!lport->link_up)
256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
257 else
258 switch (lport->state) {
259 case LPORT_ST_READY:
260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
261 break;
262 default:
263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
264 }
265 mutex_unlock(&lport->lp_mutex);
266 }
267 EXPORT_SYMBOL(fc_get_host_port_state);
268
269 /**
270 * fc_get_host_speed() - Return the speed of the given Scsi_Host
271 * @shost: The SCSI host whose port speed is to be determined
272 */
273 void fc_get_host_speed(struct Scsi_Host *shost)
274 {
275 struct fc_lport *lport = shost_priv(shost);
276
277 fc_host_speed(shost) = lport->link_speed;
278 }
279 EXPORT_SYMBOL(fc_get_host_speed);
280
281 /**
282 * fc_get_host_stats() - Return the Scsi_Host's statistics
283 * @shost: The SCSI host whose statistics are to be returned
284 */
285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
286 {
287 struct fc_host_statistics *fcoe_stats;
288 struct fc_lport *lport = shost_priv(shost);
289 struct timespec v0, v1;
290 unsigned int cpu;
291
292 fcoe_stats = &lport->host_stats;
293 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
294
295 jiffies_to_timespec(jiffies, &v0);
296 jiffies_to_timespec(lport->boot_time, &v1);
297 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
298
299 for_each_possible_cpu(cpu) {
300 struct fcoe_dev_stats *stats;
301
302 stats = per_cpu_ptr(lport->dev_stats, cpu);
303
304 fcoe_stats->tx_frames += stats->TxFrames;
305 fcoe_stats->tx_words += stats->TxWords;
306 fcoe_stats->rx_frames += stats->RxFrames;
307 fcoe_stats->rx_words += stats->RxWords;
308 fcoe_stats->error_frames += stats->ErrorFrames;
309 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
310 fcoe_stats->fcp_input_requests += stats->InputRequests;
311 fcoe_stats->fcp_output_requests += stats->OutputRequests;
312 fcoe_stats->fcp_control_requests += stats->ControlRequests;
313 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
314 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
315 fcoe_stats->link_failure_count += stats->LinkFailureCount;
316 }
317 fcoe_stats->lip_count = -1;
318 fcoe_stats->nos_count = -1;
319 fcoe_stats->loss_of_sync_count = -1;
320 fcoe_stats->loss_of_signal_count = -1;
321 fcoe_stats->prim_seq_protocol_err_count = -1;
322 fcoe_stats->dumped_frames = -1;
323 return fcoe_stats;
324 }
325 EXPORT_SYMBOL(fc_get_host_stats);
326
327 /**
328 * fc_lport_flogi_fill() - Fill in FLOGI command for request
329 * @lport: The local port the FLOGI is for
330 * @flogi: The FLOGI command
331 * @op: The opcode
332 */
333 static void fc_lport_flogi_fill(struct fc_lport *lport,
334 struct fc_els_flogi *flogi,
335 unsigned int op)
336 {
337 struct fc_els_csp *sp;
338 struct fc_els_cssp *cp;
339
340 memset(flogi, 0, sizeof(*flogi));
341 flogi->fl_cmd = (u8) op;
342 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
343 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
344 sp = &flogi->fl_csp;
345 sp->sp_hi_ver = 0x20;
346 sp->sp_lo_ver = 0x20;
347 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
348 sp->sp_bb_data = htons((u16) lport->mfs);
349 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
350 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
351 if (op != ELS_FLOGI) {
352 sp->sp_features = htons(FC_SP_FT_CIRO);
353 sp->sp_tot_seq = htons(255); /* seq. we accept */
354 sp->sp_rel_off = htons(0x1f);
355 sp->sp_e_d_tov = htonl(lport->e_d_tov);
356
357 cp->cp_rdfs = htons((u16) lport->mfs);
358 cp->cp_con_seq = htons(255);
359 cp->cp_open_seq = 1;
360 }
361 }
362
363 /**
364 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
365 * @lport: The local port to add a new FC-4 type to
366 * @type: The new FC-4 type
367 */
368 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
369 {
370 __be32 *mp;
371
372 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
373 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
374 }
375
376 /**
377 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
378 * @sp: The sequence in the RLIR exchange
379 * @fp: The RLIR request frame
380 * @lport: Fibre Channel local port recieving the RLIR
381 *
382 * Locking Note: The lport lock is expected to be held before calling
383 * this function.
384 */
385 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
386 struct fc_lport *lport)
387 {
388 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
389 fc_lport_state(lport));
390
391 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
392 fc_frame_free(fp);
393 }
394
395 /**
396 * fc_lport_recv_echo_req() - Handle received ECHO request
397 * @sp: The sequence in the ECHO exchange
398 * @fp: ECHO request frame
399 * @lport: The local port recieving the ECHO
400 *
401 * Locking Note: The lport lock is expected to be held before calling
402 * this function.
403 */
404 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
405 struct fc_lport *lport)
406 {
407 struct fc_frame *fp;
408 struct fc_exch *ep = fc_seq_exch(sp);
409 unsigned int len;
410 void *pp;
411 void *dp;
412 u32 f_ctl;
413
414 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
415 fc_lport_state(lport));
416
417 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
418 pp = fc_frame_payload_get(in_fp, len);
419
420 if (len < sizeof(__be32))
421 len = sizeof(__be32);
422
423 fp = fc_frame_alloc(lport, len);
424 if (fp) {
425 dp = fc_frame_payload_get(fp, len);
426 memcpy(dp, pp, len);
427 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
428 sp = lport->tt.seq_start_next(sp);
429 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
430 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
431 FC_TYPE_ELS, f_ctl, 0);
432 lport->tt.seq_send(lport, sp, fp);
433 }
434 fc_frame_free(in_fp);
435 }
436
437 /**
438 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
439 * @sp: The sequence in the RNID exchange
440 * @fp: The RNID request frame
441 * @lport: The local port recieving the RNID
442 *
443 * Locking Note: The lport lock is expected to be held before calling
444 * this function.
445 */
446 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
447 struct fc_lport *lport)
448 {
449 struct fc_frame *fp;
450 struct fc_exch *ep = fc_seq_exch(sp);
451 struct fc_els_rnid *req;
452 struct {
453 struct fc_els_rnid_resp rnid;
454 struct fc_els_rnid_cid cid;
455 struct fc_els_rnid_gen gen;
456 } *rp;
457 struct fc_seq_els_data rjt_data;
458 u8 fmt;
459 size_t len;
460 u32 f_ctl;
461
462 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
463 fc_lport_state(lport));
464
465 req = fc_frame_payload_get(in_fp, sizeof(*req));
466 if (!req) {
467 rjt_data.fp = NULL;
468 rjt_data.reason = ELS_RJT_LOGIC;
469 rjt_data.explan = ELS_EXPL_NONE;
470 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
471 } else {
472 fmt = req->rnid_fmt;
473 len = sizeof(*rp);
474 if (fmt != ELS_RNIDF_GEN ||
475 ntohl(lport->rnid_gen.rnid_atype) == 0) {
476 fmt = ELS_RNIDF_NONE; /* nothing to provide */
477 len -= sizeof(rp->gen);
478 }
479 fp = fc_frame_alloc(lport, len);
480 if (fp) {
481 rp = fc_frame_payload_get(fp, len);
482 memset(rp, 0, len);
483 rp->rnid.rnid_cmd = ELS_LS_ACC;
484 rp->rnid.rnid_fmt = fmt;
485 rp->rnid.rnid_cid_len = sizeof(rp->cid);
486 rp->cid.rnid_wwpn = htonll(lport->wwpn);
487 rp->cid.rnid_wwnn = htonll(lport->wwnn);
488 if (fmt == ELS_RNIDF_GEN) {
489 rp->rnid.rnid_sid_len = sizeof(rp->gen);
490 memcpy(&rp->gen, &lport->rnid_gen,
491 sizeof(rp->gen));
492 }
493 sp = lport->tt.seq_start_next(sp);
494 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
495 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
496 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
497 FC_TYPE_ELS, f_ctl, 0);
498 lport->tt.seq_send(lport, sp, fp);
499 }
500 }
501 fc_frame_free(in_fp);
502 }
503
504 /**
505 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
506 * @sp: The sequence in the LOGO exchange
507 * @fp: The LOGO request frame
508 * @lport: The local port recieving the LOGO
509 *
510 * Locking Note: The lport lock is exected to be held before calling
511 * this function.
512 */
513 static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
514 struct fc_lport *lport)
515 {
516 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
517 fc_lport_enter_reset(lport);
518 fc_frame_free(fp);
519 }
520
521 /**
522 * fc_fabric_login() - Start the lport state machine
523 * @lport: The local port that should log into the fabric
524 *
525 * Locking Note: This function should not be called
526 * with the lport lock held.
527 */
528 int fc_fabric_login(struct fc_lport *lport)
529 {
530 int rc = -1;
531
532 mutex_lock(&lport->lp_mutex);
533 if (lport->state == LPORT_ST_DISABLED ||
534 lport->state == LPORT_ST_LOGO) {
535 fc_lport_state_enter(lport, LPORT_ST_RESET);
536 fc_lport_enter_reset(lport);
537 rc = 0;
538 }
539 mutex_unlock(&lport->lp_mutex);
540
541 return rc;
542 }
543 EXPORT_SYMBOL(fc_fabric_login);
544
545 /**
546 * __fc_linkup() - Handler for transport linkup events
547 * @lport: The lport whose link is up
548 *
549 * Locking: must be called with the lp_mutex held
550 */
551 void __fc_linkup(struct fc_lport *lport)
552 {
553 if (!lport->link_up) {
554 lport->link_up = 1;
555
556 if (lport->state == LPORT_ST_RESET)
557 fc_lport_enter_flogi(lport);
558 }
559 }
560
561 /**
562 * fc_linkup() - Handler for transport linkup events
563 * @lport: The local port whose link is up
564 */
565 void fc_linkup(struct fc_lport *lport)
566 {
567 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
568 lport->host->host_no, lport->port_id);
569
570 mutex_lock(&lport->lp_mutex);
571 __fc_linkup(lport);
572 mutex_unlock(&lport->lp_mutex);
573 }
574 EXPORT_SYMBOL(fc_linkup);
575
576 /**
577 * __fc_linkdown() - Handler for transport linkdown events
578 * @lport: The lport whose link is down
579 *
580 * Locking: must be called with the lp_mutex held
581 */
582 void __fc_linkdown(struct fc_lport *lport)
583 {
584 if (lport->link_up) {
585 lport->link_up = 0;
586 fc_lport_enter_reset(lport);
587 lport->tt.fcp_cleanup(lport);
588 }
589 }
590
591 /**
592 * fc_linkdown() - Handler for transport linkdown events
593 * @lport: The local port whose link is down
594 */
595 void fc_linkdown(struct fc_lport *lport)
596 {
597 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
598 lport->host->host_no, lport->port_id);
599
600 mutex_lock(&lport->lp_mutex);
601 __fc_linkdown(lport);
602 mutex_unlock(&lport->lp_mutex);
603 }
604 EXPORT_SYMBOL(fc_linkdown);
605
606 /**
607 * fc_fabric_logoff() - Logout of the fabric
608 * @lport: The local port to logoff the fabric
609 *
610 * Return value:
611 * 0 for success, -1 for failure
612 */
613 int fc_fabric_logoff(struct fc_lport *lport)
614 {
615 lport->tt.disc_stop_final(lport);
616 mutex_lock(&lport->lp_mutex);
617 if (lport->dns_rdata)
618 lport->tt.rport_logoff(lport->dns_rdata);
619 mutex_unlock(&lport->lp_mutex);
620 lport->tt.rport_flush_queue();
621 mutex_lock(&lport->lp_mutex);
622 fc_lport_enter_logo(lport);
623 mutex_unlock(&lport->lp_mutex);
624 cancel_delayed_work_sync(&lport->retry_work);
625 return 0;
626 }
627 EXPORT_SYMBOL(fc_fabric_logoff);
628
629 /**
630 * fc_lport_destroy() - Unregister a fc_lport
631 * @lport: The local port to unregister
632 *
633 * Note:
634 * exit routine for fc_lport instance
635 * clean-up all the allocated memory
636 * and free up other system resources.
637 *
638 */
639 int fc_lport_destroy(struct fc_lport *lport)
640 {
641 mutex_lock(&lport->lp_mutex);
642 lport->state = LPORT_ST_DISABLED;
643 lport->link_up = 0;
644 lport->tt.frame_send = fc_frame_drop;
645 mutex_unlock(&lport->lp_mutex);
646
647 lport->tt.fcp_abort_io(lport);
648 lport->tt.disc_stop_final(lport);
649 lport->tt.exch_mgr_reset(lport, 0, 0);
650 return 0;
651 }
652 EXPORT_SYMBOL(fc_lport_destroy);
653
654 /**
655 * fc_set_mfs() - Set the maximum frame size for a local port
656 * @lport: The local port to set the MFS for
657 * @mfs: The new MFS
658 */
659 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
660 {
661 unsigned int old_mfs;
662 int rc = -EINVAL;
663
664 mutex_lock(&lport->lp_mutex);
665
666 old_mfs = lport->mfs;
667
668 if (mfs >= FC_MIN_MAX_FRAME) {
669 mfs &= ~3;
670 if (mfs > FC_MAX_FRAME)
671 mfs = FC_MAX_FRAME;
672 mfs -= sizeof(struct fc_frame_header);
673 lport->mfs = mfs;
674 rc = 0;
675 }
676
677 if (!rc && mfs < old_mfs)
678 fc_lport_enter_reset(lport);
679
680 mutex_unlock(&lport->lp_mutex);
681
682 return rc;
683 }
684 EXPORT_SYMBOL(fc_set_mfs);
685
686 /**
687 * fc_lport_disc_callback() - Callback for discovery events
688 * @lport: The local port receiving the event
689 * @event: The discovery event
690 */
691 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
692 {
693 switch (event) {
694 case DISC_EV_SUCCESS:
695 FC_LPORT_DBG(lport, "Discovery succeeded\n");
696 break;
697 case DISC_EV_FAILED:
698 printk(KERN_ERR "host%d: libfc: "
699 "Discovery failed for port (%6.6x)\n",
700 lport->host->host_no, lport->port_id);
701 mutex_lock(&lport->lp_mutex);
702 fc_lport_enter_reset(lport);
703 mutex_unlock(&lport->lp_mutex);
704 break;
705 case DISC_EV_NONE:
706 WARN_ON(1);
707 break;
708 }
709 }
710
711 /**
712 * fc_rport_enter_ready() - Enter the ready state and start discovery
713 * @lport: The local port that is ready
714 *
715 * Locking Note: The lport lock is expected to be held before calling
716 * this routine.
717 */
718 static void fc_lport_enter_ready(struct fc_lport *lport)
719 {
720 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
721 fc_lport_state(lport));
722
723 fc_lport_state_enter(lport, LPORT_ST_READY);
724 if (lport->vport)
725 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
726 fc_vports_linkchange(lport);
727
728 if (!lport->ptp_rdata)
729 lport->tt.disc_start(fc_lport_disc_callback, lport);
730 }
731
732 /**
733 * fc_lport_set_port_id() - set the local port Port ID
734 * @lport: The local port which will have its Port ID set.
735 * @port_id: The new port ID.
736 * @fp: The frame containing the incoming request, or NULL.
737 *
738 * Locking Note: The lport lock is expected to be held before calling
739 * this function.
740 */
741 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
742 struct fc_frame *fp)
743 {
744 if (port_id)
745 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
746 lport->host->host_no, port_id);
747
748 lport->port_id = port_id;
749
750 /* Update the fc_host */
751 fc_host_port_id(lport->host) = port_id;
752
753 if (lport->tt.lport_set_port_id)
754 lport->tt.lport_set_port_id(lport, port_id, fp);
755 }
756
757 /**
758 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
759 * @lport: The local port which will have its Port ID set.
760 * @port_id: The new port ID.
761 *
762 * Called by the lower-level driver when transport sets the local port_id.
763 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
764 * discovery to be skipped.
765 */
766 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
767 {
768 mutex_lock(&lport->lp_mutex);
769
770 fc_lport_set_port_id(lport, port_id, NULL);
771
772 switch (lport->state) {
773 case LPORT_ST_RESET:
774 case LPORT_ST_FLOGI:
775 if (port_id)
776 fc_lport_enter_ready(lport);
777 break;
778 default:
779 break;
780 }
781 mutex_unlock(&lport->lp_mutex);
782 }
783 EXPORT_SYMBOL(fc_lport_set_local_id);
784
785 /**
786 * fc_lport_recv_flogi_req() - Receive a FLOGI request
787 * @sp_in: The sequence the FLOGI is on
788 * @rx_fp: The FLOGI frame
789 * @lport: The local port that recieved the request
790 *
791 * A received FLOGI request indicates a point-to-point connection.
792 * Accept it with the common service parameters indicating our N port.
793 * Set up to do a PLOGI if we have the higher-number WWPN.
794 *
795 * Locking Note: The lport lock is expected to be held before calling
796 * this function.
797 */
798 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
799 struct fc_frame *rx_fp,
800 struct fc_lport *lport)
801 {
802 struct fc_frame *fp;
803 struct fc_frame_header *fh;
804 struct fc_seq *sp;
805 struct fc_exch *ep;
806 struct fc_els_flogi *flp;
807 struct fc_els_flogi *new_flp;
808 u64 remote_wwpn;
809 u32 remote_fid;
810 u32 local_fid;
811 u32 f_ctl;
812
813 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
814 fc_lport_state(lport));
815
816 fh = fc_frame_header_get(rx_fp);
817 remote_fid = ntoh24(fh->fh_s_id);
818 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
819 if (!flp)
820 goto out;
821 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
822 if (remote_wwpn == lport->wwpn) {
823 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
824 "with same WWPN %16.16llx\n",
825 lport->host->host_no, remote_wwpn);
826 goto out;
827 }
828 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
829
830 /*
831 * XXX what is the right thing to do for FIDs?
832 * The originator might expect our S_ID to be 0xfffffe.
833 * But if so, both of us could end up with the same FID.
834 */
835 local_fid = FC_LOCAL_PTP_FID_LO;
836 if (remote_wwpn < lport->wwpn) {
837 local_fid = FC_LOCAL_PTP_FID_HI;
838 if (!remote_fid || remote_fid == local_fid)
839 remote_fid = FC_LOCAL_PTP_FID_LO;
840 } else if (!remote_fid) {
841 remote_fid = FC_LOCAL_PTP_FID_HI;
842 }
843
844 fc_lport_set_port_id(lport, local_fid, rx_fp);
845
846 fp = fc_frame_alloc(lport, sizeof(*flp));
847 if (fp) {
848 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
849 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
850 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
851 new_flp->fl_cmd = (u8) ELS_LS_ACC;
852
853 /*
854 * Send the response. If this fails, the originator should
855 * repeat the sequence.
856 */
857 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
858 ep = fc_seq_exch(sp);
859 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
860 FC_TYPE_ELS, f_ctl, 0);
861 lport->tt.seq_send(lport, sp, fp);
862
863 } else {
864 fc_lport_error(lport, fp);
865 }
866 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
867 get_unaligned_be64(&flp->fl_wwnn));
868
869 out:
870 sp = fr_seq(rx_fp);
871 fc_frame_free(rx_fp);
872 }
873
874 /**
875 * fc_lport_recv_req() - The generic lport request handler
876 * @lport: The local port that received the request
877 * @sp: The sequence the request is on
878 * @fp: The request frame
879 *
880 * This function will see if the lport handles the request or
881 * if an rport should handle the request.
882 *
883 * Locking Note: This function should not be called with the lport
884 * lock held becuase it will grab the lock.
885 */
886 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
887 struct fc_frame *fp)
888 {
889 struct fc_frame_header *fh = fc_frame_header_get(fp);
890 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
891
892 mutex_lock(&lport->lp_mutex);
893
894 /*
895 * Handle special ELS cases like FLOGI, LOGO, and
896 * RSCN here. These don't require a session.
897 * Even if we had a session, it might not be ready.
898 */
899 if (!lport->link_up)
900 fc_frame_free(fp);
901 else if (fh->fh_type == FC_TYPE_ELS &&
902 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
903 /*
904 * Check opcode.
905 */
906 recv = lport->tt.rport_recv_req;
907 switch (fc_frame_payload_op(fp)) {
908 case ELS_FLOGI:
909 if (!lport->point_to_multipoint)
910 recv = fc_lport_recv_flogi_req;
911 break;
912 case ELS_LOGO:
913 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
914 recv = fc_lport_recv_logo_req;
915 break;
916 case ELS_RSCN:
917 recv = lport->tt.disc_recv_req;
918 break;
919 case ELS_ECHO:
920 recv = fc_lport_recv_echo_req;
921 break;
922 case ELS_RLIR:
923 recv = fc_lport_recv_rlir_req;
924 break;
925 case ELS_RNID:
926 recv = fc_lport_recv_rnid_req;
927 break;
928 }
929
930 recv(sp, fp, lport);
931 } else {
932 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
933 fr_eof(fp));
934 fc_frame_free(fp);
935 }
936 mutex_unlock(&lport->lp_mutex);
937
938 /*
939 * The common exch_done for all request may not be good
940 * if any request requires longer hold on exhange. XXX
941 */
942 lport->tt.exch_done(sp);
943 }
944
945 /**
946 * fc_lport_reset() - Reset a local port
947 * @lport: The local port which should be reset
948 *
949 * Locking Note: This functions should not be called with the
950 * lport lock held.
951 */
952 int fc_lport_reset(struct fc_lport *lport)
953 {
954 cancel_delayed_work_sync(&lport->retry_work);
955 mutex_lock(&lport->lp_mutex);
956 fc_lport_enter_reset(lport);
957 mutex_unlock(&lport->lp_mutex);
958 return 0;
959 }
960 EXPORT_SYMBOL(fc_lport_reset);
961
962 /**
963 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
964 * @lport: The local port to be reset
965 *
966 * Locking Note: The lport lock is expected to be held before calling
967 * this routine.
968 */
969 static void fc_lport_reset_locked(struct fc_lport *lport)
970 {
971 if (lport->dns_rdata)
972 lport->tt.rport_logoff(lport->dns_rdata);
973
974 if (lport->ptp_rdata) {
975 lport->tt.rport_logoff(lport->ptp_rdata);
976 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
977 lport->ptp_rdata = NULL;
978 }
979
980 lport->tt.disc_stop(lport);
981
982 lport->tt.exch_mgr_reset(lport, 0, 0);
983 fc_host_fabric_name(lport->host) = 0;
984
985 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
986 fc_lport_set_port_id(lport, 0, NULL);
987 }
988
989 /**
990 * fc_lport_enter_reset() - Reset the local port
991 * @lport: The local port to be reset
992 *
993 * Locking Note: The lport lock is expected to be held before calling
994 * this routine.
995 */
996 static void fc_lport_enter_reset(struct fc_lport *lport)
997 {
998 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
999 fc_lport_state(lport));
1000
1001 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
1002 return;
1003
1004 if (lport->vport) {
1005 if (lport->link_up)
1006 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
1007 else
1008 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1009 }
1010 fc_lport_state_enter(lport, LPORT_ST_RESET);
1011 fc_vports_linkchange(lport);
1012 fc_lport_reset_locked(lport);
1013 if (lport->link_up)
1014 fc_lport_enter_flogi(lport);
1015 }
1016
1017 /**
1018 * fc_lport_enter_disabled() - Disable the local port
1019 * @lport: The local port to be reset
1020 *
1021 * Locking Note: The lport lock is expected to be held before calling
1022 * this routine.
1023 */
1024 static void fc_lport_enter_disabled(struct fc_lport *lport)
1025 {
1026 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1027 fc_lport_state(lport));
1028
1029 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1030 fc_vports_linkchange(lport);
1031 fc_lport_reset_locked(lport);
1032 }
1033
1034 /**
1035 * fc_lport_error() - Handler for any errors
1036 * @lport: The local port that the error was on
1037 * @fp: The error code encoded in a frame pointer
1038 *
1039 * If the error was caused by a resource allocation failure
1040 * then wait for half a second and retry, otherwise retry
1041 * after the e_d_tov time.
1042 */
1043 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1044 {
1045 unsigned long delay = 0;
1046 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1047 PTR_ERR(fp), fc_lport_state(lport),
1048 lport->retry_count);
1049
1050 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1051 /*
1052 * Memory allocation failure, or the exchange timed out.
1053 * Retry after delay
1054 */
1055 if (lport->retry_count < lport->max_retry_count) {
1056 lport->retry_count++;
1057 if (!fp)
1058 delay = msecs_to_jiffies(500);
1059 else
1060 delay = msecs_to_jiffies(lport->e_d_tov);
1061
1062 schedule_delayed_work(&lport->retry_work, delay);
1063 } else {
1064 switch (lport->state) {
1065 case LPORT_ST_DISABLED:
1066 case LPORT_ST_READY:
1067 case LPORT_ST_RESET:
1068 case LPORT_ST_RNN_ID:
1069 case LPORT_ST_RSNN_NN:
1070 case LPORT_ST_RSPN_ID:
1071 case LPORT_ST_RFT_ID:
1072 case LPORT_ST_RFF_ID:
1073 case LPORT_ST_SCR:
1074 case LPORT_ST_DNS:
1075 case LPORT_ST_FLOGI:
1076 case LPORT_ST_LOGO:
1077 fc_lport_enter_reset(lport);
1078 break;
1079 }
1080 }
1081 }
1082 }
1083
1084 /**
1085 * fc_lport_ns_resp() - Handle response to a name server
1086 * registration exchange
1087 * @sp: current sequence in exchange
1088 * @fp: response frame
1089 * @lp_arg: Fibre Channel host port instance
1090 *
1091 * Locking Note: This function will be called without the lport lock
1092 * held, but it will lock, call an _enter_* function or fc_lport_error()
1093 * and then unlock the lport.
1094 */
1095 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1096 void *lp_arg)
1097 {
1098 struct fc_lport *lport = lp_arg;
1099 struct fc_frame_header *fh;
1100 struct fc_ct_hdr *ct;
1101
1102 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1103
1104 if (fp == ERR_PTR(-FC_EX_CLOSED))
1105 return;
1106
1107 mutex_lock(&lport->lp_mutex);
1108
1109 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1110 FC_LPORT_DBG(lport, "Received a name server response, "
1111 "but in state %s\n", fc_lport_state(lport));
1112 if (IS_ERR(fp))
1113 goto err;
1114 goto out;
1115 }
1116
1117 if (IS_ERR(fp)) {
1118 fc_lport_error(lport, fp);
1119 goto err;
1120 }
1121
1122 fh = fc_frame_header_get(fp);
1123 ct = fc_frame_payload_get(fp, sizeof(*ct));
1124
1125 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1126 ct->ct_fs_type == FC_FST_DIR &&
1127 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1128 ntohs(ct->ct_cmd) == FC_FS_ACC)
1129 switch (lport->state) {
1130 case LPORT_ST_RNN_ID:
1131 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1132 break;
1133 case LPORT_ST_RSNN_NN:
1134 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1135 break;
1136 case LPORT_ST_RSPN_ID:
1137 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1138 break;
1139 case LPORT_ST_RFT_ID:
1140 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1141 break;
1142 case LPORT_ST_RFF_ID:
1143 fc_lport_enter_scr(lport);
1144 break;
1145 default:
1146 /* should have already been caught by state checks */
1147 break;
1148 }
1149 else
1150 fc_lport_error(lport, fp);
1151 out:
1152 fc_frame_free(fp);
1153 err:
1154 mutex_unlock(&lport->lp_mutex);
1155 }
1156
1157 /**
1158 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1159 * @sp: current sequence in SCR exchange
1160 * @fp: response frame
1161 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1162 *
1163 * Locking Note: This function will be called without the lport lock
1164 * held, but it will lock, call an _enter_* function or fc_lport_error
1165 * and then unlock the lport.
1166 */
1167 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1168 void *lp_arg)
1169 {
1170 struct fc_lport *lport = lp_arg;
1171 u8 op;
1172
1173 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1174
1175 if (fp == ERR_PTR(-FC_EX_CLOSED))
1176 return;
1177
1178 mutex_lock(&lport->lp_mutex);
1179
1180 if (lport->state != LPORT_ST_SCR) {
1181 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1182 "%s\n", fc_lport_state(lport));
1183 if (IS_ERR(fp))
1184 goto err;
1185 goto out;
1186 }
1187
1188 if (IS_ERR(fp)) {
1189 fc_lport_error(lport, fp);
1190 goto err;
1191 }
1192
1193 op = fc_frame_payload_op(fp);
1194 if (op == ELS_LS_ACC)
1195 fc_lport_enter_ready(lport);
1196 else
1197 fc_lport_error(lport, fp);
1198
1199 out:
1200 fc_frame_free(fp);
1201 err:
1202 mutex_unlock(&lport->lp_mutex);
1203 }
1204
1205 /**
1206 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1207 * @lport: The local port to register for state changes
1208 *
1209 * Locking Note: The lport lock is expected to be held before calling
1210 * this routine.
1211 */
1212 static void fc_lport_enter_scr(struct fc_lport *lport)
1213 {
1214 struct fc_frame *fp;
1215
1216 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1217 fc_lport_state(lport));
1218
1219 fc_lport_state_enter(lport, LPORT_ST_SCR);
1220
1221 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1222 if (!fp) {
1223 fc_lport_error(lport, fp);
1224 return;
1225 }
1226
1227 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1228 fc_lport_scr_resp, lport,
1229 2 * lport->r_a_tov))
1230 fc_lport_error(lport, NULL);
1231 }
1232
1233 /**
1234 * fc_lport_enter_ns() - register some object with the name server
1235 * @lport: Fibre Channel local port to register
1236 *
1237 * Locking Note: The lport lock is expected to be held before calling
1238 * this routine.
1239 */
1240 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1241 {
1242 struct fc_frame *fp;
1243 enum fc_ns_req cmd;
1244 int size = sizeof(struct fc_ct_hdr);
1245 size_t len;
1246
1247 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1248 fc_lport_state_names[state],
1249 fc_lport_state(lport));
1250
1251 fc_lport_state_enter(lport, state);
1252
1253 switch (state) {
1254 case LPORT_ST_RNN_ID:
1255 cmd = FC_NS_RNN_ID;
1256 size += sizeof(struct fc_ns_rn_id);
1257 break;
1258 case LPORT_ST_RSNN_NN:
1259 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1260 /* if there is no symbolic name, skip to RFT_ID */
1261 if (!len)
1262 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1263 cmd = FC_NS_RSNN_NN;
1264 size += sizeof(struct fc_ns_rsnn) + len;
1265 break;
1266 case LPORT_ST_RSPN_ID:
1267 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1268 /* if there is no symbolic name, skip to RFT_ID */
1269 if (!len)
1270 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1271 cmd = FC_NS_RSPN_ID;
1272 size += sizeof(struct fc_ns_rspn) + len;
1273 break;
1274 case LPORT_ST_RFT_ID:
1275 cmd = FC_NS_RFT_ID;
1276 size += sizeof(struct fc_ns_rft);
1277 break;
1278 case LPORT_ST_RFF_ID:
1279 cmd = FC_NS_RFF_ID;
1280 size += sizeof(struct fc_ns_rff_id);
1281 break;
1282 default:
1283 fc_lport_error(lport, NULL);
1284 return;
1285 }
1286
1287 fp = fc_frame_alloc(lport, size);
1288 if (!fp) {
1289 fc_lport_error(lport, fp);
1290 return;
1291 }
1292
1293 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1294 fc_lport_ns_resp,
1295 lport, 3 * lport->r_a_tov))
1296 fc_lport_error(lport, fp);
1297 }
1298
1299 static struct fc_rport_operations fc_lport_rport_ops = {
1300 .event_callback = fc_lport_rport_callback,
1301 };
1302
1303 /**
1304 * fc_rport_enter_dns() - Create a fc_rport for the name server
1305 * @lport: The local port requesting a remote port for the name server
1306 *
1307 * Locking Note: The lport lock is expected to be held before calling
1308 * this routine.
1309 */
1310 static void fc_lport_enter_dns(struct fc_lport *lport)
1311 {
1312 struct fc_rport_priv *rdata;
1313
1314 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1315 fc_lport_state(lport));
1316
1317 fc_lport_state_enter(lport, LPORT_ST_DNS);
1318
1319 mutex_lock(&lport->disc.disc_mutex);
1320 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1321 mutex_unlock(&lport->disc.disc_mutex);
1322 if (!rdata)
1323 goto err;
1324
1325 rdata->ops = &fc_lport_rport_ops;
1326 lport->tt.rport_login(rdata);
1327 return;
1328
1329 err:
1330 fc_lport_error(lport, NULL);
1331 }
1332
1333 /**
1334 * fc_lport_timeout() - Handler for the retry_work timer
1335 * @work: The work struct of the local port
1336 */
1337 static void fc_lport_timeout(struct work_struct *work)
1338 {
1339 struct fc_lport *lport =
1340 container_of(work, struct fc_lport,
1341 retry_work.work);
1342
1343 mutex_lock(&lport->lp_mutex);
1344
1345 switch (lport->state) {
1346 case LPORT_ST_DISABLED:
1347 WARN_ON(1);
1348 break;
1349 case LPORT_ST_READY:
1350 WARN_ON(1);
1351 break;
1352 case LPORT_ST_RESET:
1353 break;
1354 case LPORT_ST_FLOGI:
1355 fc_lport_enter_flogi(lport);
1356 break;
1357 case LPORT_ST_DNS:
1358 fc_lport_enter_dns(lport);
1359 break;
1360 case LPORT_ST_RNN_ID:
1361 case LPORT_ST_RSNN_NN:
1362 case LPORT_ST_RSPN_ID:
1363 case LPORT_ST_RFT_ID:
1364 case LPORT_ST_RFF_ID:
1365 fc_lport_enter_ns(lport, lport->state);
1366 break;
1367 case LPORT_ST_SCR:
1368 fc_lport_enter_scr(lport);
1369 break;
1370 case LPORT_ST_LOGO:
1371 fc_lport_enter_logo(lport);
1372 break;
1373 }
1374
1375 mutex_unlock(&lport->lp_mutex);
1376 }
1377
1378 /**
1379 * fc_lport_logo_resp() - Handle response to LOGO request
1380 * @sp: The sequence that the LOGO was on
1381 * @fp: The LOGO frame
1382 * @lp_arg: The lport port that received the LOGO request
1383 *
1384 * Locking Note: This function will be called without the lport lock
1385 * held, but it will lock, call an _enter_* function or fc_lport_error()
1386 * and then unlock the lport.
1387 */
1388 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1389 void *lp_arg)
1390 {
1391 struct fc_lport *lport = lp_arg;
1392 u8 op;
1393
1394 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1395
1396 if (fp == ERR_PTR(-FC_EX_CLOSED))
1397 return;
1398
1399 mutex_lock(&lport->lp_mutex);
1400
1401 if (lport->state != LPORT_ST_LOGO) {
1402 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1403 "%s\n", fc_lport_state(lport));
1404 if (IS_ERR(fp))
1405 goto err;
1406 goto out;
1407 }
1408
1409 if (IS_ERR(fp)) {
1410 fc_lport_error(lport, fp);
1411 goto err;
1412 }
1413
1414 op = fc_frame_payload_op(fp);
1415 if (op == ELS_LS_ACC)
1416 fc_lport_enter_disabled(lport);
1417 else
1418 fc_lport_error(lport, fp);
1419
1420 out:
1421 fc_frame_free(fp);
1422 err:
1423 mutex_unlock(&lport->lp_mutex);
1424 }
1425 EXPORT_SYMBOL(fc_lport_logo_resp);
1426
1427 /**
1428 * fc_rport_enter_logo() - Logout of the fabric
1429 * @lport: The local port to be logged out
1430 *
1431 * Locking Note: The lport lock is expected to be held before calling
1432 * this routine.
1433 */
1434 static void fc_lport_enter_logo(struct fc_lport *lport)
1435 {
1436 struct fc_frame *fp;
1437 struct fc_els_logo *logo;
1438
1439 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1440 fc_lport_state(lport));
1441
1442 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1443 fc_vports_linkchange(lport);
1444
1445 fp = fc_frame_alloc(lport, sizeof(*logo));
1446 if (!fp) {
1447 fc_lport_error(lport, fp);
1448 return;
1449 }
1450
1451 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1452 fc_lport_logo_resp, lport,
1453 2 * lport->r_a_tov))
1454 fc_lport_error(lport, NULL);
1455 }
1456
1457 /**
1458 * fc_lport_flogi_resp() - Handle response to FLOGI request
1459 * @sp: The sequence that the FLOGI was on
1460 * @fp: The FLOGI response frame
1461 * @lp_arg: The lport port that received the FLOGI response
1462 *
1463 * Locking Note: This function will be called without the lport lock
1464 * held, but it will lock, call an _enter_* function or fc_lport_error()
1465 * and then unlock the lport.
1466 */
1467 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1468 void *lp_arg)
1469 {
1470 struct fc_lport *lport = lp_arg;
1471 struct fc_frame_header *fh;
1472 struct fc_els_flogi *flp;
1473 u32 did;
1474 u16 csp_flags;
1475 unsigned int r_a_tov;
1476 unsigned int e_d_tov;
1477 u16 mfs;
1478
1479 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1480
1481 if (fp == ERR_PTR(-FC_EX_CLOSED))
1482 return;
1483
1484 mutex_lock(&lport->lp_mutex);
1485
1486 if (lport->state != LPORT_ST_FLOGI) {
1487 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1488 "%s\n", fc_lport_state(lport));
1489 if (IS_ERR(fp))
1490 goto err;
1491 goto out;
1492 }
1493
1494 if (IS_ERR(fp)) {
1495 fc_lport_error(lport, fp);
1496 goto err;
1497 }
1498
1499 fh = fc_frame_header_get(fp);
1500 did = ntoh24(fh->fh_d_id);
1501 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1502 flp = fc_frame_payload_get(fp, sizeof(*flp));
1503 if (flp) {
1504 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1505 FC_SP_BB_DATA_MASK;
1506 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1507 mfs < lport->mfs)
1508 lport->mfs = mfs;
1509 csp_flags = ntohs(flp->fl_csp.sp_features);
1510 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1511 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1512 if (csp_flags & FC_SP_FT_EDTR)
1513 e_d_tov /= 1000000;
1514
1515 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1516
1517 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1518 if (e_d_tov > lport->e_d_tov)
1519 lport->e_d_tov = e_d_tov;
1520 lport->r_a_tov = 2 * e_d_tov;
1521 fc_lport_set_port_id(lport, did, fp);
1522 printk(KERN_INFO "host%d: libfc: "
1523 "Port (%6.6x) entered "
1524 "point-to-point mode\n",
1525 lport->host->host_no, did);
1526 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1527 get_unaligned_be64(
1528 &flp->fl_wwpn),
1529 get_unaligned_be64(
1530 &flp->fl_wwnn));
1531 } else {
1532 lport->e_d_tov = e_d_tov;
1533 lport->r_a_tov = r_a_tov;
1534 fc_host_fabric_name(lport->host) =
1535 get_unaligned_be64(&flp->fl_wwnn);
1536 fc_lport_set_port_id(lport, did, fp);
1537 fc_lport_enter_dns(lport);
1538 }
1539 }
1540 } else {
1541 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1542 }
1543
1544 out:
1545 fc_frame_free(fp);
1546 err:
1547 mutex_unlock(&lport->lp_mutex);
1548 }
1549 EXPORT_SYMBOL(fc_lport_flogi_resp);
1550
1551 /**
1552 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1553 * @lport: Fibre Channel local port to be logged in to the fabric
1554 *
1555 * Locking Note: The lport lock is expected to be held before calling
1556 * this routine.
1557 */
1558 void fc_lport_enter_flogi(struct fc_lport *lport)
1559 {
1560 struct fc_frame *fp;
1561
1562 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1563 fc_lport_state(lport));
1564
1565 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1566
1567 if (lport->point_to_multipoint) {
1568 if (lport->port_id)
1569 fc_lport_enter_ready(lport);
1570 return;
1571 }
1572
1573 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1574 if (!fp)
1575 return fc_lport_error(lport, fp);
1576
1577 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1578 lport->vport ? ELS_FDISC : ELS_FLOGI,
1579 fc_lport_flogi_resp, lport,
1580 lport->vport ? 2 * lport->r_a_tov :
1581 lport->e_d_tov))
1582 fc_lport_error(lport, NULL);
1583 }
1584
1585 /**
1586 * fc_lport_config() - Configure a fc_lport
1587 * @lport: The local port to be configured
1588 */
1589 int fc_lport_config(struct fc_lport *lport)
1590 {
1591 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1592 mutex_init(&lport->lp_mutex);
1593
1594 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1595
1596 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1597 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1598
1599 return 0;
1600 }
1601 EXPORT_SYMBOL(fc_lport_config);
1602
1603 /**
1604 * fc_lport_init() - Initialize the lport layer for a local port
1605 * @lport: The local port to initialize the exchange layer for
1606 */
1607 int fc_lport_init(struct fc_lport *lport)
1608 {
1609 if (!lport->tt.lport_recv)
1610 lport->tt.lport_recv = fc_lport_recv_req;
1611
1612 if (!lport->tt.lport_reset)
1613 lport->tt.lport_reset = fc_lport_reset;
1614
1615 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1616 fc_host_node_name(lport->host) = lport->wwnn;
1617 fc_host_port_name(lport->host) = lport->wwpn;
1618 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1619 memset(fc_host_supported_fc4s(lport->host), 0,
1620 sizeof(fc_host_supported_fc4s(lport->host)));
1621 fc_host_supported_fc4s(lport->host)[2] = 1;
1622 fc_host_supported_fc4s(lport->host)[7] = 1;
1623
1624 /* This value is also unchanging */
1625 memset(fc_host_active_fc4s(lport->host), 0,
1626 sizeof(fc_host_active_fc4s(lport->host)));
1627 fc_host_active_fc4s(lport->host)[2] = 1;
1628 fc_host_active_fc4s(lport->host)[7] = 1;
1629 fc_host_maxframe_size(lport->host) = lport->mfs;
1630 fc_host_supported_speeds(lport->host) = 0;
1631 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1632 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1633 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1634 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1635
1636 return 0;
1637 }
1638 EXPORT_SYMBOL(fc_lport_init);
1639
1640 /**
1641 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1642 * @sp: The sequence for the FC Passthrough response
1643 * @fp: The response frame
1644 * @info_arg: The BSG info that the response is for
1645 */
1646 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1647 void *info_arg)
1648 {
1649 struct fc_bsg_info *info = info_arg;
1650 struct fc_bsg_job *job = info->job;
1651 struct fc_lport *lport = info->lport;
1652 struct fc_frame_header *fh;
1653 size_t len;
1654 void *buf;
1655
1656 if (IS_ERR(fp)) {
1657 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1658 -ECONNABORTED : -ETIMEDOUT;
1659 job->reply_len = sizeof(uint32_t);
1660 job->state_flags |= FC_RQST_STATE_DONE;
1661 job->job_done(job);
1662 kfree(info);
1663 return;
1664 }
1665
1666 mutex_lock(&lport->lp_mutex);
1667 fh = fc_frame_header_get(fp);
1668 len = fr_len(fp) - sizeof(*fh);
1669 buf = fc_frame_payload_get(fp, 0);
1670
1671 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1672 /* Get the response code from the first frame payload */
1673 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1674 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1675 (unsigned short)fc_frame_payload_op(fp);
1676
1677 /* Save the reply status of the job */
1678 job->reply->reply_data.ctels_reply.status =
1679 (cmd == info->rsp_code) ?
1680 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1681 }
1682
1683 job->reply->reply_payload_rcv_len +=
1684 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1685 &info->offset, KM_BIO_SRC_IRQ, NULL);
1686
1687 if (fr_eof(fp) == FC_EOF_T &&
1688 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1689 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1690 if (job->reply->reply_payload_rcv_len >
1691 job->reply_payload.payload_len)
1692 job->reply->reply_payload_rcv_len =
1693 job->reply_payload.payload_len;
1694 job->reply->result = 0;
1695 job->state_flags |= FC_RQST_STATE_DONE;
1696 job->job_done(job);
1697 kfree(info);
1698 }
1699 fc_frame_free(fp);
1700 mutex_unlock(&lport->lp_mutex);
1701 }
1702
1703 /**
1704 * fc_lport_els_request() - Send ELS passthrough request
1705 * @job: The BSG Passthrough job
1706 * @lport: The local port sending the request
1707 * @did: The destination port id
1708 *
1709 * Locking Note: The lport lock is expected to be held before calling
1710 * this routine.
1711 */
1712 static int fc_lport_els_request(struct fc_bsg_job *job,
1713 struct fc_lport *lport,
1714 u32 did, u32 tov)
1715 {
1716 struct fc_bsg_info *info;
1717 struct fc_frame *fp;
1718 struct fc_frame_header *fh;
1719 char *pp;
1720 int len;
1721
1722 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1723 if (!fp)
1724 return -ENOMEM;
1725
1726 len = job->request_payload.payload_len;
1727 pp = fc_frame_payload_get(fp, len);
1728
1729 sg_copy_to_buffer(job->request_payload.sg_list,
1730 job->request_payload.sg_cnt,
1731 pp, len);
1732
1733 fh = fc_frame_header_get(fp);
1734 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1735 hton24(fh->fh_d_id, did);
1736 hton24(fh->fh_s_id, lport->port_id);
1737 fh->fh_type = FC_TYPE_ELS;
1738 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1739 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1740 fh->fh_cs_ctl = 0;
1741 fh->fh_df_ctl = 0;
1742 fh->fh_parm_offset = 0;
1743
1744 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1745 if (!info) {
1746 fc_frame_free(fp);
1747 return -ENOMEM;
1748 }
1749
1750 info->job = job;
1751 info->lport = lport;
1752 info->rsp_code = ELS_LS_ACC;
1753 info->nents = job->reply_payload.sg_cnt;
1754 info->sg = job->reply_payload.sg_list;
1755
1756 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1757 NULL, info, tov))
1758 return -ECOMM;
1759 return 0;
1760 }
1761
1762 /**
1763 * fc_lport_ct_request() - Send CT Passthrough request
1764 * @job: The BSG Passthrough job
1765 * @lport: The local port sending the request
1766 * @did: The destination FC-ID
1767 * @tov: The timeout period to wait for the response
1768 *
1769 * Locking Note: The lport lock is expected to be held before calling
1770 * this routine.
1771 */
1772 static int fc_lport_ct_request(struct fc_bsg_job *job,
1773 struct fc_lport *lport, u32 did, u32 tov)
1774 {
1775 struct fc_bsg_info *info;
1776 struct fc_frame *fp;
1777 struct fc_frame_header *fh;
1778 struct fc_ct_req *ct;
1779 size_t len;
1780
1781 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1782 job->request_payload.payload_len);
1783 if (!fp)
1784 return -ENOMEM;
1785
1786 len = job->request_payload.payload_len;
1787 ct = fc_frame_payload_get(fp, len);
1788
1789 sg_copy_to_buffer(job->request_payload.sg_list,
1790 job->request_payload.sg_cnt,
1791 ct, len);
1792
1793 fh = fc_frame_header_get(fp);
1794 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1795 hton24(fh->fh_d_id, did);
1796 hton24(fh->fh_s_id, lport->port_id);
1797 fh->fh_type = FC_TYPE_CT;
1798 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1799 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1800 fh->fh_cs_ctl = 0;
1801 fh->fh_df_ctl = 0;
1802 fh->fh_parm_offset = 0;
1803
1804 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1805 if (!info) {
1806 fc_frame_free(fp);
1807 return -ENOMEM;
1808 }
1809
1810 info->job = job;
1811 info->lport = lport;
1812 info->rsp_code = FC_FS_ACC;
1813 info->nents = job->reply_payload.sg_cnt;
1814 info->sg = job->reply_payload.sg_list;
1815
1816 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1817 NULL, info, tov))
1818 return -ECOMM;
1819 return 0;
1820 }
1821
1822 /**
1823 * fc_lport_bsg_request() - The common entry point for sending
1824 * FC Passthrough requests
1825 * @job: The BSG passthrough job
1826 */
1827 int fc_lport_bsg_request(struct fc_bsg_job *job)
1828 {
1829 struct request *rsp = job->req->next_rq;
1830 struct Scsi_Host *shost = job->shost;
1831 struct fc_lport *lport = shost_priv(shost);
1832 struct fc_rport *rport;
1833 struct fc_rport_priv *rdata;
1834 int rc = -EINVAL;
1835 u32 did;
1836
1837 job->reply->reply_payload_rcv_len = 0;
1838 if (rsp)
1839 rsp->resid_len = job->reply_payload.payload_len;
1840
1841 mutex_lock(&lport->lp_mutex);
1842
1843 switch (job->request->msgcode) {
1844 case FC_BSG_RPT_ELS:
1845 rport = job->rport;
1846 if (!rport)
1847 break;
1848
1849 rdata = rport->dd_data;
1850 rc = fc_lport_els_request(job, lport, rport->port_id,
1851 rdata->e_d_tov);
1852 break;
1853
1854 case FC_BSG_RPT_CT:
1855 rport = job->rport;
1856 if (!rport)
1857 break;
1858
1859 rdata = rport->dd_data;
1860 rc = fc_lport_ct_request(job, lport, rport->port_id,
1861 rdata->e_d_tov);
1862 break;
1863
1864 case FC_BSG_HST_CT:
1865 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1866 if (did == FC_FID_DIR_SERV)
1867 rdata = lport->dns_rdata;
1868 else
1869 rdata = lport->tt.rport_lookup(lport, did);
1870
1871 if (!rdata)
1872 break;
1873
1874 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1875 break;
1876
1877 case FC_BSG_HST_ELS_NOLOGIN:
1878 did = ntoh24(job->request->rqst_data.h_els.port_id);
1879 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1880 break;
1881 }
1882
1883 mutex_unlock(&lport->lp_mutex);
1884 return rc;
1885 }
1886 EXPORT_SYMBOL(fc_lport_bsg_request);