fix typos concerning "hierarchy"
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HIERARCHY
36 *
37 * The following hierarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the hierarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/slab.h>
92 #include <asm/unaligned.h>
93
94 #include <scsi/fc/fc_gs.h>
95
96 #include <scsi/libfc.h>
97 #include <scsi/fc_encode.h>
98 #include <linux/scatterlist.h>
99
100 #include "fc_libfc.h"
101
102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
103 #define FC_LOCAL_PTP_FID_LO 0x010101
104 #define FC_LOCAL_PTP_FID_HI 0x010102
105
106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
107
108 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
109
110 static void fc_lport_enter_reset(struct fc_lport *);
111 static void fc_lport_enter_flogi(struct fc_lport *);
112 static void fc_lport_enter_dns(struct fc_lport *);
113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
114 static void fc_lport_enter_scr(struct fc_lport *);
115 static void fc_lport_enter_ready(struct fc_lport *);
116 static void fc_lport_enter_logo(struct fc_lport *);
117
118 static const char *fc_lport_state_names[] = {
119 [LPORT_ST_DISABLED] = "disabled",
120 [LPORT_ST_FLOGI] = "FLOGI",
121 [LPORT_ST_DNS] = "dNS",
122 [LPORT_ST_RNN_ID] = "RNN_ID",
123 [LPORT_ST_RSNN_NN] = "RSNN_NN",
124 [LPORT_ST_RSPN_ID] = "RSPN_ID",
125 [LPORT_ST_RFT_ID] = "RFT_ID",
126 [LPORT_ST_RFF_ID] = "RFF_ID",
127 [LPORT_ST_SCR] = "SCR",
128 [LPORT_ST_READY] = "Ready",
129 [LPORT_ST_LOGO] = "LOGO",
130 [LPORT_ST_RESET] = "reset",
131 };
132
133 /**
134 * struct fc_bsg_info - FC Passthrough managemet structure
135 * @job: The passthrough job
136 * @lport: The local port to pass through a command
137 * @rsp_code: The expected response code
138 * @sg: job->reply_payload.sg_list
139 * @nents: job->reply_payload.sg_cnt
140 * @offset: The offset into the response data
141 */
142 struct fc_bsg_info {
143 struct fc_bsg_job *job;
144 struct fc_lport *lport;
145 u16 rsp_code;
146 struct scatterlist *sg;
147 u32 nents;
148 size_t offset;
149 };
150
151 /**
152 * fc_frame_drop() - Dummy frame handler
153 * @lport: The local port the frame was received on
154 * @fp: The received frame
155 */
156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
157 {
158 fc_frame_free(fp);
159 return 0;
160 }
161
162 /**
163 * fc_lport_rport_callback() - Event handler for rport events
164 * @lport: The lport which is receiving the event
165 * @rdata: private remote port data
166 * @event: The event that occured
167 *
168 * Locking Note: The rport lock should not be held when calling
169 * this function.
170 */
171 static void fc_lport_rport_callback(struct fc_lport *lport,
172 struct fc_rport_priv *rdata,
173 enum fc_rport_event event)
174 {
175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
176 rdata->ids.port_id);
177
178 mutex_lock(&lport->lp_mutex);
179 switch (event) {
180 case RPORT_EV_READY:
181 if (lport->state == LPORT_ST_DNS) {
182 lport->dns_rdata = rdata;
183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
184 } else {
185 FC_LPORT_DBG(lport, "Received an READY event "
186 "on port (%6.6x) for the directory "
187 "server, but the lport is not "
188 "in the DNS state, it's in the "
189 "%d state", rdata->ids.port_id,
190 lport->state);
191 lport->tt.rport_logoff(rdata);
192 }
193 break;
194 case RPORT_EV_LOGO:
195 case RPORT_EV_FAILED:
196 case RPORT_EV_STOP:
197 lport->dns_rdata = NULL;
198 break;
199 case RPORT_EV_NONE:
200 break;
201 }
202 mutex_unlock(&lport->lp_mutex);
203 }
204
205 /**
206 * fc_lport_state() - Return a string which represents the lport's state
207 * @lport: The lport whose state is to converted to a string
208 */
209 static const char *fc_lport_state(struct fc_lport *lport)
210 {
211 const char *cp;
212
213 cp = fc_lport_state_names[lport->state];
214 if (!cp)
215 cp = "unknown";
216 return cp;
217 }
218
219 /**
220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
221 * @lport: The lport to attach the ptp rport to
222 * @remote_fid: The FID of the ptp rport
223 * @remote_wwpn: The WWPN of the ptp rport
224 * @remote_wwnn: The WWNN of the ptp rport
225 */
226 static void fc_lport_ptp_setup(struct fc_lport *lport,
227 u32 remote_fid, u64 remote_wwpn,
228 u64 remote_wwnn)
229 {
230 mutex_lock(&lport->disc.disc_mutex);
231 if (lport->ptp_rdata) {
232 lport->tt.rport_logoff(lport->ptp_rdata);
233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
234 }
235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
236 kref_get(&lport->ptp_rdata->kref);
237 lport->ptp_rdata->ids.port_name = remote_wwpn;
238 lport->ptp_rdata->ids.node_name = remote_wwnn;
239 mutex_unlock(&lport->disc.disc_mutex);
240
241 lport->tt.rport_login(lport->ptp_rdata);
242
243 fc_lport_enter_ready(lport);
244 }
245
246 /**
247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
248 * @shost: The SCSI host whose port state is to be determined
249 */
250 void fc_get_host_port_state(struct Scsi_Host *shost)
251 {
252 struct fc_lport *lport = shost_priv(shost);
253
254 mutex_lock(&lport->lp_mutex);
255 if (!lport->link_up)
256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
257 else
258 switch (lport->state) {
259 case LPORT_ST_READY:
260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
261 break;
262 default:
263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
264 }
265 mutex_unlock(&lport->lp_mutex);
266 }
267 EXPORT_SYMBOL(fc_get_host_port_state);
268
269 /**
270 * fc_get_host_speed() - Return the speed of the given Scsi_Host
271 * @shost: The SCSI host whose port speed is to be determined
272 */
273 void fc_get_host_speed(struct Scsi_Host *shost)
274 {
275 struct fc_lport *lport = shost_priv(shost);
276
277 fc_host_speed(shost) = lport->link_speed;
278 }
279 EXPORT_SYMBOL(fc_get_host_speed);
280
281 /**
282 * fc_get_host_stats() - Return the Scsi_Host's statistics
283 * @shost: The SCSI host whose statistics are to be returned
284 */
285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
286 {
287 struct fc_host_statistics *fcoe_stats;
288 struct fc_lport *lport = shost_priv(shost);
289 struct timespec v0, v1;
290 unsigned int cpu;
291
292 fcoe_stats = &lport->host_stats;
293 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
294
295 jiffies_to_timespec(jiffies, &v0);
296 jiffies_to_timespec(lport->boot_time, &v1);
297 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
298
299 for_each_possible_cpu(cpu) {
300 struct fcoe_dev_stats *stats;
301
302 stats = per_cpu_ptr(lport->dev_stats, cpu);
303
304 fcoe_stats->tx_frames += stats->TxFrames;
305 fcoe_stats->tx_words += stats->TxWords;
306 fcoe_stats->rx_frames += stats->RxFrames;
307 fcoe_stats->rx_words += stats->RxWords;
308 fcoe_stats->error_frames += stats->ErrorFrames;
309 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
310 fcoe_stats->fcp_input_requests += stats->InputRequests;
311 fcoe_stats->fcp_output_requests += stats->OutputRequests;
312 fcoe_stats->fcp_control_requests += stats->ControlRequests;
313 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
314 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
315 fcoe_stats->link_failure_count += stats->LinkFailureCount;
316 }
317 fcoe_stats->lip_count = -1;
318 fcoe_stats->nos_count = -1;
319 fcoe_stats->loss_of_sync_count = -1;
320 fcoe_stats->loss_of_signal_count = -1;
321 fcoe_stats->prim_seq_protocol_err_count = -1;
322 fcoe_stats->dumped_frames = -1;
323 return fcoe_stats;
324 }
325 EXPORT_SYMBOL(fc_get_host_stats);
326
327 /**
328 * fc_lport_flogi_fill() - Fill in FLOGI command for request
329 * @lport: The local port the FLOGI is for
330 * @flogi: The FLOGI command
331 * @op: The opcode
332 */
333 static void fc_lport_flogi_fill(struct fc_lport *lport,
334 struct fc_els_flogi *flogi,
335 unsigned int op)
336 {
337 struct fc_els_csp *sp;
338 struct fc_els_cssp *cp;
339
340 memset(flogi, 0, sizeof(*flogi));
341 flogi->fl_cmd = (u8) op;
342 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
343 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
344 sp = &flogi->fl_csp;
345 sp->sp_hi_ver = 0x20;
346 sp->sp_lo_ver = 0x20;
347 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
348 sp->sp_bb_data = htons((u16) lport->mfs);
349 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
350 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
351 if (op != ELS_FLOGI) {
352 sp->sp_features = htons(FC_SP_FT_CIRO);
353 sp->sp_tot_seq = htons(255); /* seq. we accept */
354 sp->sp_rel_off = htons(0x1f);
355 sp->sp_e_d_tov = htonl(lport->e_d_tov);
356
357 cp->cp_rdfs = htons((u16) lport->mfs);
358 cp->cp_con_seq = htons(255);
359 cp->cp_open_seq = 1;
360 }
361 }
362
363 /**
364 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
365 * @lport: The local port to add a new FC-4 type to
366 * @type: The new FC-4 type
367 */
368 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
369 {
370 __be32 *mp;
371
372 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
373 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
374 }
375
376 /**
377 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
378 * @sp: The sequence in the RLIR exchange
379 * @fp: The RLIR request frame
380 * @lport: Fibre Channel local port recieving the RLIR
381 *
382 * Locking Note: The lport lock is expected to be held before calling
383 * this function.
384 */
385 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
386 struct fc_lport *lport)
387 {
388 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
389 fc_lport_state(lport));
390
391 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
392 fc_frame_free(fp);
393 }
394
395 /**
396 * fc_lport_recv_echo_req() - Handle received ECHO request
397 * @sp: The sequence in the ECHO exchange
398 * @fp: ECHO request frame
399 * @lport: The local port recieving the ECHO
400 *
401 * Locking Note: The lport lock is expected to be held before calling
402 * this function.
403 */
404 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
405 struct fc_lport *lport)
406 {
407 struct fc_frame *fp;
408 struct fc_exch *ep = fc_seq_exch(sp);
409 unsigned int len;
410 void *pp;
411 void *dp;
412 u32 f_ctl;
413
414 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
415 fc_lport_state(lport));
416
417 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
418 pp = fc_frame_payload_get(in_fp, len);
419
420 if (len < sizeof(__be32))
421 len = sizeof(__be32);
422
423 fp = fc_frame_alloc(lport, len);
424 if (fp) {
425 dp = fc_frame_payload_get(fp, len);
426 memcpy(dp, pp, len);
427 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
428 sp = lport->tt.seq_start_next(sp);
429 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
430 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
431 FC_TYPE_ELS, f_ctl, 0);
432 lport->tt.seq_send(lport, sp, fp);
433 }
434 fc_frame_free(in_fp);
435 }
436
437 /**
438 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
439 * @sp: The sequence in the RNID exchange
440 * @fp: The RNID request frame
441 * @lport: The local port recieving the RNID
442 *
443 * Locking Note: The lport lock is expected to be held before calling
444 * this function.
445 */
446 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
447 struct fc_lport *lport)
448 {
449 struct fc_frame *fp;
450 struct fc_exch *ep = fc_seq_exch(sp);
451 struct fc_els_rnid *req;
452 struct {
453 struct fc_els_rnid_resp rnid;
454 struct fc_els_rnid_cid cid;
455 struct fc_els_rnid_gen gen;
456 } *rp;
457 struct fc_seq_els_data rjt_data;
458 u8 fmt;
459 size_t len;
460 u32 f_ctl;
461
462 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
463 fc_lport_state(lport));
464
465 req = fc_frame_payload_get(in_fp, sizeof(*req));
466 if (!req) {
467 rjt_data.fp = NULL;
468 rjt_data.reason = ELS_RJT_LOGIC;
469 rjt_data.explan = ELS_EXPL_NONE;
470 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
471 } else {
472 fmt = req->rnid_fmt;
473 len = sizeof(*rp);
474 if (fmt != ELS_RNIDF_GEN ||
475 ntohl(lport->rnid_gen.rnid_atype) == 0) {
476 fmt = ELS_RNIDF_NONE; /* nothing to provide */
477 len -= sizeof(rp->gen);
478 }
479 fp = fc_frame_alloc(lport, len);
480 if (fp) {
481 rp = fc_frame_payload_get(fp, len);
482 memset(rp, 0, len);
483 rp->rnid.rnid_cmd = ELS_LS_ACC;
484 rp->rnid.rnid_fmt = fmt;
485 rp->rnid.rnid_cid_len = sizeof(rp->cid);
486 rp->cid.rnid_wwpn = htonll(lport->wwpn);
487 rp->cid.rnid_wwnn = htonll(lport->wwnn);
488 if (fmt == ELS_RNIDF_GEN) {
489 rp->rnid.rnid_sid_len = sizeof(rp->gen);
490 memcpy(&rp->gen, &lport->rnid_gen,
491 sizeof(rp->gen));
492 }
493 sp = lport->tt.seq_start_next(sp);
494 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
495 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
496 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
497 FC_TYPE_ELS, f_ctl, 0);
498 lport->tt.seq_send(lport, sp, fp);
499 }
500 }
501 fc_frame_free(in_fp);
502 }
503
504 /**
505 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
506 * @sp: The sequence in the LOGO exchange
507 * @fp: The LOGO request frame
508 * @lport: The local port recieving the LOGO
509 *
510 * Locking Note: The lport lock is exected to be held before calling
511 * this function.
512 */
513 static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
514 struct fc_lport *lport)
515 {
516 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
517 fc_lport_enter_reset(lport);
518 fc_frame_free(fp);
519 }
520
521 /**
522 * fc_fabric_login() - Start the lport state machine
523 * @lport: The local port that should log into the fabric
524 *
525 * Locking Note: This function should not be called
526 * with the lport lock held.
527 */
528 int fc_fabric_login(struct fc_lport *lport)
529 {
530 int rc = -1;
531
532 mutex_lock(&lport->lp_mutex);
533 if (lport->state == LPORT_ST_DISABLED ||
534 lport->state == LPORT_ST_LOGO) {
535 fc_lport_state_enter(lport, LPORT_ST_RESET);
536 fc_lport_enter_reset(lport);
537 rc = 0;
538 }
539 mutex_unlock(&lport->lp_mutex);
540
541 return rc;
542 }
543 EXPORT_SYMBOL(fc_fabric_login);
544
545 /**
546 * __fc_linkup() - Handler for transport linkup events
547 * @lport: The lport whose link is up
548 *
549 * Locking: must be called with the lp_mutex held
550 */
551 void __fc_linkup(struct fc_lport *lport)
552 {
553 if (!lport->link_up) {
554 lport->link_up = 1;
555
556 if (lport->state == LPORT_ST_RESET)
557 fc_lport_enter_flogi(lport);
558 }
559 }
560
561 /**
562 * fc_linkup() - Handler for transport linkup events
563 * @lport: The local port whose link is up
564 */
565 void fc_linkup(struct fc_lport *lport)
566 {
567 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
568 lport->host->host_no, lport->port_id);
569
570 mutex_lock(&lport->lp_mutex);
571 __fc_linkup(lport);
572 mutex_unlock(&lport->lp_mutex);
573 }
574 EXPORT_SYMBOL(fc_linkup);
575
576 /**
577 * __fc_linkdown() - Handler for transport linkdown events
578 * @lport: The lport whose link is down
579 *
580 * Locking: must be called with the lp_mutex held
581 */
582 void __fc_linkdown(struct fc_lport *lport)
583 {
584 if (lport->link_up) {
585 lport->link_up = 0;
586 fc_lport_enter_reset(lport);
587 lport->tt.fcp_cleanup(lport);
588 }
589 }
590
591 /**
592 * fc_linkdown() - Handler for transport linkdown events
593 * @lport: The local port whose link is down
594 */
595 void fc_linkdown(struct fc_lport *lport)
596 {
597 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
598 lport->host->host_no, lport->port_id);
599
600 mutex_lock(&lport->lp_mutex);
601 __fc_linkdown(lport);
602 mutex_unlock(&lport->lp_mutex);
603 }
604 EXPORT_SYMBOL(fc_linkdown);
605
606 /**
607 * fc_fabric_logoff() - Logout of the fabric
608 * @lport: The local port to logoff the fabric
609 *
610 * Return value:
611 * 0 for success, -1 for failure
612 */
613 int fc_fabric_logoff(struct fc_lport *lport)
614 {
615 lport->tt.disc_stop_final(lport);
616 mutex_lock(&lport->lp_mutex);
617 if (lport->dns_rdata)
618 lport->tt.rport_logoff(lport->dns_rdata);
619 mutex_unlock(&lport->lp_mutex);
620 lport->tt.rport_flush_queue();
621 mutex_lock(&lport->lp_mutex);
622 fc_lport_enter_logo(lport);
623 mutex_unlock(&lport->lp_mutex);
624 cancel_delayed_work_sync(&lport->retry_work);
625 return 0;
626 }
627 EXPORT_SYMBOL(fc_fabric_logoff);
628
629 /**
630 * fc_lport_destroy() - Unregister a fc_lport
631 * @lport: The local port to unregister
632 *
633 * Note:
634 * exit routine for fc_lport instance
635 * clean-up all the allocated memory
636 * and free up other system resources.
637 *
638 */
639 int fc_lport_destroy(struct fc_lport *lport)
640 {
641 mutex_lock(&lport->lp_mutex);
642 lport->state = LPORT_ST_DISABLED;
643 lport->link_up = 0;
644 lport->tt.frame_send = fc_frame_drop;
645 mutex_unlock(&lport->lp_mutex);
646
647 lport->tt.fcp_abort_io(lport);
648 lport->tt.disc_stop_final(lport);
649 lport->tt.exch_mgr_reset(lport, 0, 0);
650 return 0;
651 }
652 EXPORT_SYMBOL(fc_lport_destroy);
653
654 /**
655 * fc_set_mfs() - Set the maximum frame size for a local port
656 * @lport: The local port to set the MFS for
657 * @mfs: The new MFS
658 */
659 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
660 {
661 unsigned int old_mfs;
662 int rc = -EINVAL;
663
664 mutex_lock(&lport->lp_mutex);
665
666 old_mfs = lport->mfs;
667
668 if (mfs >= FC_MIN_MAX_FRAME) {
669 mfs &= ~3;
670 if (mfs > FC_MAX_FRAME)
671 mfs = FC_MAX_FRAME;
672 mfs -= sizeof(struct fc_frame_header);
673 lport->mfs = mfs;
674 rc = 0;
675 }
676
677 if (!rc && mfs < old_mfs)
678 fc_lport_enter_reset(lport);
679
680 mutex_unlock(&lport->lp_mutex);
681
682 return rc;
683 }
684 EXPORT_SYMBOL(fc_set_mfs);
685
686 /**
687 * fc_lport_disc_callback() - Callback for discovery events
688 * @lport: The local port receiving the event
689 * @event: The discovery event
690 */
691 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
692 {
693 switch (event) {
694 case DISC_EV_SUCCESS:
695 FC_LPORT_DBG(lport, "Discovery succeeded\n");
696 break;
697 case DISC_EV_FAILED:
698 printk(KERN_ERR "host%d: libfc: "
699 "Discovery failed for port (%6.6x)\n",
700 lport->host->host_no, lport->port_id);
701 mutex_lock(&lport->lp_mutex);
702 fc_lport_enter_reset(lport);
703 mutex_unlock(&lport->lp_mutex);
704 break;
705 case DISC_EV_NONE:
706 WARN_ON(1);
707 break;
708 }
709 }
710
711 /**
712 * fc_rport_enter_ready() - Enter the ready state and start discovery
713 * @lport: The local port that is ready
714 *
715 * Locking Note: The lport lock is expected to be held before calling
716 * this routine.
717 */
718 static void fc_lport_enter_ready(struct fc_lport *lport)
719 {
720 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
721 fc_lport_state(lport));
722
723 fc_lport_state_enter(lport, LPORT_ST_READY);
724 if (lport->vport)
725 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
726 fc_vports_linkchange(lport);
727
728 if (!lport->ptp_rdata)
729 lport->tt.disc_start(fc_lport_disc_callback, lport);
730 }
731
732 /**
733 * fc_lport_set_port_id() - set the local port Port ID
734 * @lport: The local port which will have its Port ID set.
735 * @port_id: The new port ID.
736 * @fp: The frame containing the incoming request, or NULL.
737 *
738 * Locking Note: The lport lock is expected to be held before calling
739 * this function.
740 */
741 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
742 struct fc_frame *fp)
743 {
744 if (port_id)
745 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
746 lport->host->host_no, port_id);
747
748 lport->port_id = port_id;
749
750 /* Update the fc_host */
751 fc_host_port_id(lport->host) = port_id;
752
753 if (lport->tt.lport_set_port_id)
754 lport->tt.lport_set_port_id(lport, port_id, fp);
755 }
756
757 /**
758 * fc_lport_recv_flogi_req() - Receive a FLOGI request
759 * @sp_in: The sequence the FLOGI is on
760 * @rx_fp: The FLOGI frame
761 * @lport: The local port that recieved the request
762 *
763 * A received FLOGI request indicates a point-to-point connection.
764 * Accept it with the common service parameters indicating our N port.
765 * Set up to do a PLOGI if we have the higher-number WWPN.
766 *
767 * Locking Note: The lport lock is expected to be held before calling
768 * this function.
769 */
770 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
771 struct fc_frame *rx_fp,
772 struct fc_lport *lport)
773 {
774 struct fc_frame *fp;
775 struct fc_frame_header *fh;
776 struct fc_seq *sp;
777 struct fc_exch *ep;
778 struct fc_els_flogi *flp;
779 struct fc_els_flogi *new_flp;
780 u64 remote_wwpn;
781 u32 remote_fid;
782 u32 local_fid;
783 u32 f_ctl;
784
785 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
786 fc_lport_state(lport));
787
788 fh = fc_frame_header_get(rx_fp);
789 remote_fid = ntoh24(fh->fh_s_id);
790 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
791 if (!flp)
792 goto out;
793 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
794 if (remote_wwpn == lport->wwpn) {
795 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
796 "with same WWPN %16.16llx\n",
797 lport->host->host_no, remote_wwpn);
798 goto out;
799 }
800 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
801
802 /*
803 * XXX what is the right thing to do for FIDs?
804 * The originator might expect our S_ID to be 0xfffffe.
805 * But if so, both of us could end up with the same FID.
806 */
807 local_fid = FC_LOCAL_PTP_FID_LO;
808 if (remote_wwpn < lport->wwpn) {
809 local_fid = FC_LOCAL_PTP_FID_HI;
810 if (!remote_fid || remote_fid == local_fid)
811 remote_fid = FC_LOCAL_PTP_FID_LO;
812 } else if (!remote_fid) {
813 remote_fid = FC_LOCAL_PTP_FID_HI;
814 }
815
816 fc_lport_set_port_id(lport, local_fid, rx_fp);
817
818 fp = fc_frame_alloc(lport, sizeof(*flp));
819 if (fp) {
820 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
821 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
822 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
823 new_flp->fl_cmd = (u8) ELS_LS_ACC;
824
825 /*
826 * Send the response. If this fails, the originator should
827 * repeat the sequence.
828 */
829 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
830 ep = fc_seq_exch(sp);
831 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
832 FC_TYPE_ELS, f_ctl, 0);
833 lport->tt.seq_send(lport, sp, fp);
834
835 } else {
836 fc_lport_error(lport, fp);
837 }
838 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
839 get_unaligned_be64(&flp->fl_wwnn));
840
841 out:
842 sp = fr_seq(rx_fp);
843 fc_frame_free(rx_fp);
844 }
845
846 /**
847 * fc_lport_recv_req() - The generic lport request handler
848 * @lport: The local port that received the request
849 * @sp: The sequence the request is on
850 * @fp: The request frame
851 *
852 * This function will see if the lport handles the request or
853 * if an rport should handle the request.
854 *
855 * Locking Note: This function should not be called with the lport
856 * lock held becuase it will grab the lock.
857 */
858 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
859 struct fc_frame *fp)
860 {
861 struct fc_frame_header *fh = fc_frame_header_get(fp);
862 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
863
864 mutex_lock(&lport->lp_mutex);
865
866 /*
867 * Handle special ELS cases like FLOGI, LOGO, and
868 * RSCN here. These don't require a session.
869 * Even if we had a session, it might not be ready.
870 */
871 if (!lport->link_up)
872 fc_frame_free(fp);
873 else if (fh->fh_type == FC_TYPE_ELS &&
874 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
875 /*
876 * Check opcode.
877 */
878 recv = lport->tt.rport_recv_req;
879 switch (fc_frame_payload_op(fp)) {
880 case ELS_FLOGI:
881 recv = fc_lport_recv_flogi_req;
882 break;
883 case ELS_LOGO:
884 fh = fc_frame_header_get(fp);
885 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
886 recv = fc_lport_recv_logo_req;
887 break;
888 case ELS_RSCN:
889 recv = lport->tt.disc_recv_req;
890 break;
891 case ELS_ECHO:
892 recv = fc_lport_recv_echo_req;
893 break;
894 case ELS_RLIR:
895 recv = fc_lport_recv_rlir_req;
896 break;
897 case ELS_RNID:
898 recv = fc_lport_recv_rnid_req;
899 break;
900 }
901
902 recv(sp, fp, lport);
903 } else {
904 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
905 fr_eof(fp));
906 fc_frame_free(fp);
907 }
908 mutex_unlock(&lport->lp_mutex);
909
910 /*
911 * The common exch_done for all request may not be good
912 * if any request requires longer hold on exhange. XXX
913 */
914 lport->tt.exch_done(sp);
915 }
916
917 /**
918 * fc_lport_reset() - Reset a local port
919 * @lport: The local port which should be reset
920 *
921 * Locking Note: This functions should not be called with the
922 * lport lock held.
923 */
924 int fc_lport_reset(struct fc_lport *lport)
925 {
926 cancel_delayed_work_sync(&lport->retry_work);
927 mutex_lock(&lport->lp_mutex);
928 fc_lport_enter_reset(lport);
929 mutex_unlock(&lport->lp_mutex);
930 return 0;
931 }
932 EXPORT_SYMBOL(fc_lport_reset);
933
934 /**
935 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
936 * @lport: The local port to be reset
937 *
938 * Locking Note: The lport lock is expected to be held before calling
939 * this routine.
940 */
941 static void fc_lport_reset_locked(struct fc_lport *lport)
942 {
943 if (lport->dns_rdata)
944 lport->tt.rport_logoff(lport->dns_rdata);
945
946 if (lport->ptp_rdata) {
947 lport->tt.rport_logoff(lport->ptp_rdata);
948 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
949 lport->ptp_rdata = NULL;
950 }
951
952 lport->tt.disc_stop(lport);
953
954 lport->tt.exch_mgr_reset(lport, 0, 0);
955 fc_host_fabric_name(lport->host) = 0;
956
957 if (lport->port_id)
958 fc_lport_set_port_id(lport, 0, NULL);
959 }
960
961 /**
962 * fc_lport_enter_reset() - Reset the local port
963 * @lport: The local port to be reset
964 *
965 * Locking Note: The lport lock is expected to be held before calling
966 * this routine.
967 */
968 static void fc_lport_enter_reset(struct fc_lport *lport)
969 {
970 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
971 fc_lport_state(lport));
972
973 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
974 return;
975
976 if (lport->vport) {
977 if (lport->link_up)
978 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
979 else
980 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
981 }
982 fc_lport_state_enter(lport, LPORT_ST_RESET);
983 fc_vports_linkchange(lport);
984 fc_lport_reset_locked(lport);
985 if (lport->link_up)
986 fc_lport_enter_flogi(lport);
987 }
988
989 /**
990 * fc_lport_enter_disabled() - Disable the local port
991 * @lport: The local port to be reset
992 *
993 * Locking Note: The lport lock is expected to be held before calling
994 * this routine.
995 */
996 static void fc_lport_enter_disabled(struct fc_lport *lport)
997 {
998 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
999 fc_lport_state(lport));
1000
1001 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1002 fc_vports_linkchange(lport);
1003 fc_lport_reset_locked(lport);
1004 }
1005
1006 /**
1007 * fc_lport_error() - Handler for any errors
1008 * @lport: The local port that the error was on
1009 * @fp: The error code encoded in a frame pointer
1010 *
1011 * If the error was caused by a resource allocation failure
1012 * then wait for half a second and retry, otherwise retry
1013 * after the e_d_tov time.
1014 */
1015 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1016 {
1017 unsigned long delay = 0;
1018 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1019 PTR_ERR(fp), fc_lport_state(lport),
1020 lport->retry_count);
1021
1022 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1023 /*
1024 * Memory allocation failure, or the exchange timed out.
1025 * Retry after delay
1026 */
1027 if (lport->retry_count < lport->max_retry_count) {
1028 lport->retry_count++;
1029 if (!fp)
1030 delay = msecs_to_jiffies(500);
1031 else
1032 delay = msecs_to_jiffies(lport->e_d_tov);
1033
1034 schedule_delayed_work(&lport->retry_work, delay);
1035 } else {
1036 switch (lport->state) {
1037 case LPORT_ST_DISABLED:
1038 case LPORT_ST_READY:
1039 case LPORT_ST_RESET:
1040 case LPORT_ST_RNN_ID:
1041 case LPORT_ST_RSNN_NN:
1042 case LPORT_ST_RSPN_ID:
1043 case LPORT_ST_RFT_ID:
1044 case LPORT_ST_RFF_ID:
1045 case LPORT_ST_SCR:
1046 case LPORT_ST_DNS:
1047 case LPORT_ST_FLOGI:
1048 case LPORT_ST_LOGO:
1049 fc_lport_enter_reset(lport);
1050 break;
1051 }
1052 }
1053 }
1054 }
1055
1056 /**
1057 * fc_lport_ns_resp() - Handle response to a name server
1058 * registration exchange
1059 * @sp: current sequence in exchange
1060 * @fp: response frame
1061 * @lp_arg: Fibre Channel host port instance
1062 *
1063 * Locking Note: This function will be called without the lport lock
1064 * held, but it will lock, call an _enter_* function or fc_lport_error()
1065 * and then unlock the lport.
1066 */
1067 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1068 void *lp_arg)
1069 {
1070 struct fc_lport *lport = lp_arg;
1071 struct fc_frame_header *fh;
1072 struct fc_ct_hdr *ct;
1073
1074 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1075
1076 if (fp == ERR_PTR(-FC_EX_CLOSED))
1077 return;
1078
1079 mutex_lock(&lport->lp_mutex);
1080
1081 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1082 FC_LPORT_DBG(lport, "Received a name server response, "
1083 "but in state %s\n", fc_lport_state(lport));
1084 if (IS_ERR(fp))
1085 goto err;
1086 goto out;
1087 }
1088
1089 if (IS_ERR(fp)) {
1090 fc_lport_error(lport, fp);
1091 goto err;
1092 }
1093
1094 fh = fc_frame_header_get(fp);
1095 ct = fc_frame_payload_get(fp, sizeof(*ct));
1096
1097 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1098 ct->ct_fs_type == FC_FST_DIR &&
1099 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1100 ntohs(ct->ct_cmd) == FC_FS_ACC)
1101 switch (lport->state) {
1102 case LPORT_ST_RNN_ID:
1103 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1104 break;
1105 case LPORT_ST_RSNN_NN:
1106 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1107 break;
1108 case LPORT_ST_RSPN_ID:
1109 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1110 break;
1111 case LPORT_ST_RFT_ID:
1112 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1113 break;
1114 case LPORT_ST_RFF_ID:
1115 fc_lport_enter_scr(lport);
1116 break;
1117 default:
1118 /* should have already been caught by state checks */
1119 break;
1120 }
1121 else
1122 fc_lport_error(lport, fp);
1123 out:
1124 fc_frame_free(fp);
1125 err:
1126 mutex_unlock(&lport->lp_mutex);
1127 }
1128
1129 /**
1130 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1131 * @sp: current sequence in SCR exchange
1132 * @fp: response frame
1133 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1134 *
1135 * Locking Note: This function will be called without the lport lock
1136 * held, but it will lock, call an _enter_* function or fc_lport_error
1137 * and then unlock the lport.
1138 */
1139 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1140 void *lp_arg)
1141 {
1142 struct fc_lport *lport = lp_arg;
1143 u8 op;
1144
1145 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1146
1147 if (fp == ERR_PTR(-FC_EX_CLOSED))
1148 return;
1149
1150 mutex_lock(&lport->lp_mutex);
1151
1152 if (lport->state != LPORT_ST_SCR) {
1153 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1154 "%s\n", fc_lport_state(lport));
1155 if (IS_ERR(fp))
1156 goto err;
1157 goto out;
1158 }
1159
1160 if (IS_ERR(fp)) {
1161 fc_lport_error(lport, fp);
1162 goto err;
1163 }
1164
1165 op = fc_frame_payload_op(fp);
1166 if (op == ELS_LS_ACC)
1167 fc_lport_enter_ready(lport);
1168 else
1169 fc_lport_error(lport, fp);
1170
1171 out:
1172 fc_frame_free(fp);
1173 err:
1174 mutex_unlock(&lport->lp_mutex);
1175 }
1176
1177 /**
1178 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1179 * @lport: The local port to register for state changes
1180 *
1181 * Locking Note: The lport lock is expected to be held before calling
1182 * this routine.
1183 */
1184 static void fc_lport_enter_scr(struct fc_lport *lport)
1185 {
1186 struct fc_frame *fp;
1187
1188 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1189 fc_lport_state(lport));
1190
1191 fc_lport_state_enter(lport, LPORT_ST_SCR);
1192
1193 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1194 if (!fp) {
1195 fc_lport_error(lport, fp);
1196 return;
1197 }
1198
1199 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1200 fc_lport_scr_resp, lport,
1201 2 * lport->r_a_tov))
1202 fc_lport_error(lport, NULL);
1203 }
1204
1205 /**
1206 * fc_lport_enter_ns() - register some object with the name server
1207 * @lport: Fibre Channel local port to register
1208 *
1209 * Locking Note: The lport lock is expected to be held before calling
1210 * this routine.
1211 */
1212 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1213 {
1214 struct fc_frame *fp;
1215 enum fc_ns_req cmd;
1216 int size = sizeof(struct fc_ct_hdr);
1217 size_t len;
1218
1219 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1220 fc_lport_state_names[state],
1221 fc_lport_state(lport));
1222
1223 fc_lport_state_enter(lport, state);
1224
1225 switch (state) {
1226 case LPORT_ST_RNN_ID:
1227 cmd = FC_NS_RNN_ID;
1228 size += sizeof(struct fc_ns_rn_id);
1229 break;
1230 case LPORT_ST_RSNN_NN:
1231 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1232 /* if there is no symbolic name, skip to RFT_ID */
1233 if (!len)
1234 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1235 cmd = FC_NS_RSNN_NN;
1236 size += sizeof(struct fc_ns_rsnn) + len;
1237 break;
1238 case LPORT_ST_RSPN_ID:
1239 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1240 /* if there is no symbolic name, skip to RFT_ID */
1241 if (!len)
1242 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1243 cmd = FC_NS_RSPN_ID;
1244 size += sizeof(struct fc_ns_rspn) + len;
1245 break;
1246 case LPORT_ST_RFT_ID:
1247 cmd = FC_NS_RFT_ID;
1248 size += sizeof(struct fc_ns_rft);
1249 break;
1250 case LPORT_ST_RFF_ID:
1251 cmd = FC_NS_RFF_ID;
1252 size += sizeof(struct fc_ns_rff_id);
1253 break;
1254 default:
1255 fc_lport_error(lport, NULL);
1256 return;
1257 }
1258
1259 fp = fc_frame_alloc(lport, size);
1260 if (!fp) {
1261 fc_lport_error(lport, fp);
1262 return;
1263 }
1264
1265 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1266 fc_lport_ns_resp,
1267 lport, 3 * lport->r_a_tov))
1268 fc_lport_error(lport, fp);
1269 }
1270
1271 static struct fc_rport_operations fc_lport_rport_ops = {
1272 .event_callback = fc_lport_rport_callback,
1273 };
1274
1275 /**
1276 * fc_rport_enter_dns() - Create a fc_rport for the name server
1277 * @lport: The local port requesting a remote port for the name server
1278 *
1279 * Locking Note: The lport lock is expected to be held before calling
1280 * this routine.
1281 */
1282 static void fc_lport_enter_dns(struct fc_lport *lport)
1283 {
1284 struct fc_rport_priv *rdata;
1285
1286 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1287 fc_lport_state(lport));
1288
1289 fc_lport_state_enter(lport, LPORT_ST_DNS);
1290
1291 mutex_lock(&lport->disc.disc_mutex);
1292 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1293 mutex_unlock(&lport->disc.disc_mutex);
1294 if (!rdata)
1295 goto err;
1296
1297 rdata->ops = &fc_lport_rport_ops;
1298 lport->tt.rport_login(rdata);
1299 return;
1300
1301 err:
1302 fc_lport_error(lport, NULL);
1303 }
1304
1305 /**
1306 * fc_lport_timeout() - Handler for the retry_work timer
1307 * @work: The work struct of the local port
1308 */
1309 static void fc_lport_timeout(struct work_struct *work)
1310 {
1311 struct fc_lport *lport =
1312 container_of(work, struct fc_lport,
1313 retry_work.work);
1314
1315 mutex_lock(&lport->lp_mutex);
1316
1317 switch (lport->state) {
1318 case LPORT_ST_DISABLED:
1319 WARN_ON(1);
1320 break;
1321 case LPORT_ST_READY:
1322 WARN_ON(1);
1323 break;
1324 case LPORT_ST_RESET:
1325 break;
1326 case LPORT_ST_FLOGI:
1327 fc_lport_enter_flogi(lport);
1328 break;
1329 case LPORT_ST_DNS:
1330 fc_lport_enter_dns(lport);
1331 break;
1332 case LPORT_ST_RNN_ID:
1333 case LPORT_ST_RSNN_NN:
1334 case LPORT_ST_RSPN_ID:
1335 case LPORT_ST_RFT_ID:
1336 case LPORT_ST_RFF_ID:
1337 fc_lport_enter_ns(lport, lport->state);
1338 break;
1339 case LPORT_ST_SCR:
1340 fc_lport_enter_scr(lport);
1341 break;
1342 case LPORT_ST_LOGO:
1343 fc_lport_enter_logo(lport);
1344 break;
1345 }
1346
1347 mutex_unlock(&lport->lp_mutex);
1348 }
1349
1350 /**
1351 * fc_lport_logo_resp() - Handle response to LOGO request
1352 * @sp: The sequence that the LOGO was on
1353 * @fp: The LOGO frame
1354 * @lp_arg: The lport port that received the LOGO request
1355 *
1356 * Locking Note: This function will be called without the lport lock
1357 * held, but it will lock, call an _enter_* function or fc_lport_error()
1358 * and then unlock the lport.
1359 */
1360 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1361 void *lp_arg)
1362 {
1363 struct fc_lport *lport = lp_arg;
1364 u8 op;
1365
1366 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1367
1368 if (fp == ERR_PTR(-FC_EX_CLOSED))
1369 return;
1370
1371 mutex_lock(&lport->lp_mutex);
1372
1373 if (lport->state != LPORT_ST_LOGO) {
1374 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1375 "%s\n", fc_lport_state(lport));
1376 if (IS_ERR(fp))
1377 goto err;
1378 goto out;
1379 }
1380
1381 if (IS_ERR(fp)) {
1382 fc_lport_error(lport, fp);
1383 goto err;
1384 }
1385
1386 op = fc_frame_payload_op(fp);
1387 if (op == ELS_LS_ACC)
1388 fc_lport_enter_disabled(lport);
1389 else
1390 fc_lport_error(lport, fp);
1391
1392 out:
1393 fc_frame_free(fp);
1394 err:
1395 mutex_unlock(&lport->lp_mutex);
1396 }
1397 EXPORT_SYMBOL(fc_lport_logo_resp);
1398
1399 /**
1400 * fc_rport_enter_logo() - Logout of the fabric
1401 * @lport: The local port to be logged out
1402 *
1403 * Locking Note: The lport lock is expected to be held before calling
1404 * this routine.
1405 */
1406 static void fc_lport_enter_logo(struct fc_lport *lport)
1407 {
1408 struct fc_frame *fp;
1409 struct fc_els_logo *logo;
1410
1411 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1412 fc_lport_state(lport));
1413
1414 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1415 fc_vports_linkchange(lport);
1416
1417 fp = fc_frame_alloc(lport, sizeof(*logo));
1418 if (!fp) {
1419 fc_lport_error(lport, fp);
1420 return;
1421 }
1422
1423 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1424 fc_lport_logo_resp, lport,
1425 2 * lport->r_a_tov))
1426 fc_lport_error(lport, NULL);
1427 }
1428
1429 /**
1430 * fc_lport_flogi_resp() - Handle response to FLOGI request
1431 * @sp: The sequence that the FLOGI was on
1432 * @fp: The FLOGI response frame
1433 * @lp_arg: The lport port that received the FLOGI response
1434 *
1435 * Locking Note: This function will be called without the lport lock
1436 * held, but it will lock, call an _enter_* function or fc_lport_error()
1437 * and then unlock the lport.
1438 */
1439 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1440 void *lp_arg)
1441 {
1442 struct fc_lport *lport = lp_arg;
1443 struct fc_frame_header *fh;
1444 struct fc_els_flogi *flp;
1445 u32 did;
1446 u16 csp_flags;
1447 unsigned int r_a_tov;
1448 unsigned int e_d_tov;
1449 u16 mfs;
1450
1451 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1452
1453 if (fp == ERR_PTR(-FC_EX_CLOSED))
1454 return;
1455
1456 mutex_lock(&lport->lp_mutex);
1457
1458 if (lport->state != LPORT_ST_FLOGI) {
1459 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1460 "%s\n", fc_lport_state(lport));
1461 if (IS_ERR(fp))
1462 goto err;
1463 goto out;
1464 }
1465
1466 if (IS_ERR(fp)) {
1467 fc_lport_error(lport, fp);
1468 goto err;
1469 }
1470
1471 fh = fc_frame_header_get(fp);
1472 did = ntoh24(fh->fh_d_id);
1473 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1474 flp = fc_frame_payload_get(fp, sizeof(*flp));
1475 if (flp) {
1476 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1477 FC_SP_BB_DATA_MASK;
1478 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1479 mfs < lport->mfs)
1480 lport->mfs = mfs;
1481 csp_flags = ntohs(flp->fl_csp.sp_features);
1482 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1483 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1484 if (csp_flags & FC_SP_FT_EDTR)
1485 e_d_tov /= 1000000;
1486
1487 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1488
1489 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1490 if (e_d_tov > lport->e_d_tov)
1491 lport->e_d_tov = e_d_tov;
1492 lport->r_a_tov = 2 * e_d_tov;
1493 fc_lport_set_port_id(lport, did, fp);
1494 printk(KERN_INFO "host%d: libfc: "
1495 "Port (%6.6x) entered "
1496 "point-to-point mode\n",
1497 lport->host->host_no, did);
1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1499 get_unaligned_be64(
1500 &flp->fl_wwpn),
1501 get_unaligned_be64(
1502 &flp->fl_wwnn));
1503 } else {
1504 lport->e_d_tov = e_d_tov;
1505 lport->r_a_tov = r_a_tov;
1506 fc_host_fabric_name(lport->host) =
1507 get_unaligned_be64(&flp->fl_wwnn);
1508 fc_lport_set_port_id(lport, did, fp);
1509 fc_lport_enter_dns(lport);
1510 }
1511 }
1512 } else {
1513 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1514 }
1515
1516 out:
1517 fc_frame_free(fp);
1518 err:
1519 mutex_unlock(&lport->lp_mutex);
1520 }
1521 EXPORT_SYMBOL(fc_lport_flogi_resp);
1522
1523 /**
1524 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1525 * @lport: Fibre Channel local port to be logged in to the fabric
1526 *
1527 * Locking Note: The lport lock is expected to be held before calling
1528 * this routine.
1529 */
1530 void fc_lport_enter_flogi(struct fc_lport *lport)
1531 {
1532 struct fc_frame *fp;
1533
1534 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1535 fc_lport_state(lport));
1536
1537 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1538
1539 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1540 if (!fp)
1541 return fc_lport_error(lport, fp);
1542
1543 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1544 lport->vport ? ELS_FDISC : ELS_FLOGI,
1545 fc_lport_flogi_resp, lport,
1546 lport->vport ? 2 * lport->r_a_tov :
1547 lport->e_d_tov))
1548 fc_lport_error(lport, NULL);
1549 }
1550
1551 /**
1552 * fc_lport_config() - Configure a fc_lport
1553 * @lport: The local port to be configured
1554 */
1555 int fc_lport_config(struct fc_lport *lport)
1556 {
1557 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1558 mutex_init(&lport->lp_mutex);
1559
1560 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1561
1562 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1563 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1564
1565 return 0;
1566 }
1567 EXPORT_SYMBOL(fc_lport_config);
1568
1569 /**
1570 * fc_lport_init() - Initialize the lport layer for a local port
1571 * @lport: The local port to initialize the exchange layer for
1572 */
1573 int fc_lport_init(struct fc_lport *lport)
1574 {
1575 if (!lport->tt.lport_recv)
1576 lport->tt.lport_recv = fc_lport_recv_req;
1577
1578 if (!lport->tt.lport_reset)
1579 lport->tt.lport_reset = fc_lport_reset;
1580
1581 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1582 fc_host_node_name(lport->host) = lport->wwnn;
1583 fc_host_port_name(lport->host) = lport->wwpn;
1584 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1585 memset(fc_host_supported_fc4s(lport->host), 0,
1586 sizeof(fc_host_supported_fc4s(lport->host)));
1587 fc_host_supported_fc4s(lport->host)[2] = 1;
1588 fc_host_supported_fc4s(lport->host)[7] = 1;
1589
1590 /* This value is also unchanging */
1591 memset(fc_host_active_fc4s(lport->host), 0,
1592 sizeof(fc_host_active_fc4s(lport->host)));
1593 fc_host_active_fc4s(lport->host)[2] = 1;
1594 fc_host_active_fc4s(lport->host)[7] = 1;
1595 fc_host_maxframe_size(lport->host) = lport->mfs;
1596 fc_host_supported_speeds(lport->host) = 0;
1597 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1598 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1601
1602 return 0;
1603 }
1604 EXPORT_SYMBOL(fc_lport_init);
1605
1606 /**
1607 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1608 * @sp: The sequence for the FC Passthrough response
1609 * @fp: The response frame
1610 * @info_arg: The BSG info that the response is for
1611 */
1612 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1613 void *info_arg)
1614 {
1615 struct fc_bsg_info *info = info_arg;
1616 struct fc_bsg_job *job = info->job;
1617 struct fc_lport *lport = info->lport;
1618 struct fc_frame_header *fh;
1619 size_t len;
1620 void *buf;
1621
1622 if (IS_ERR(fp)) {
1623 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1624 -ECONNABORTED : -ETIMEDOUT;
1625 job->reply_len = sizeof(uint32_t);
1626 job->state_flags |= FC_RQST_STATE_DONE;
1627 job->job_done(job);
1628 kfree(info);
1629 return;
1630 }
1631
1632 mutex_lock(&lport->lp_mutex);
1633 fh = fc_frame_header_get(fp);
1634 len = fr_len(fp) - sizeof(*fh);
1635 buf = fc_frame_payload_get(fp, 0);
1636
1637 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1638 /* Get the response code from the first frame payload */
1639 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1640 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1641 (unsigned short)fc_frame_payload_op(fp);
1642
1643 /* Save the reply status of the job */
1644 job->reply->reply_data.ctels_reply.status =
1645 (cmd == info->rsp_code) ?
1646 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1647 }
1648
1649 job->reply->reply_payload_rcv_len +=
1650 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1651 &info->offset, KM_BIO_SRC_IRQ, NULL);
1652
1653 if (fr_eof(fp) == FC_EOF_T &&
1654 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1655 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1656 if (job->reply->reply_payload_rcv_len >
1657 job->reply_payload.payload_len)
1658 job->reply->reply_payload_rcv_len =
1659 job->reply_payload.payload_len;
1660 job->reply->result = 0;
1661 job->state_flags |= FC_RQST_STATE_DONE;
1662 job->job_done(job);
1663 kfree(info);
1664 }
1665 fc_frame_free(fp);
1666 mutex_unlock(&lport->lp_mutex);
1667 }
1668
1669 /**
1670 * fc_lport_els_request() - Send ELS passthrough request
1671 * @job: The BSG Passthrough job
1672 * @lport: The local port sending the request
1673 * @did: The destination port id
1674 *
1675 * Locking Note: The lport lock is expected to be held before calling
1676 * this routine.
1677 */
1678 static int fc_lport_els_request(struct fc_bsg_job *job,
1679 struct fc_lport *lport,
1680 u32 did, u32 tov)
1681 {
1682 struct fc_bsg_info *info;
1683 struct fc_frame *fp;
1684 struct fc_frame_header *fh;
1685 char *pp;
1686 int len;
1687
1688 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1689 if (!fp)
1690 return -ENOMEM;
1691
1692 len = job->request_payload.payload_len;
1693 pp = fc_frame_payload_get(fp, len);
1694
1695 sg_copy_to_buffer(job->request_payload.sg_list,
1696 job->request_payload.sg_cnt,
1697 pp, len);
1698
1699 fh = fc_frame_header_get(fp);
1700 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1701 hton24(fh->fh_d_id, did);
1702 hton24(fh->fh_s_id, lport->port_id);
1703 fh->fh_type = FC_TYPE_ELS;
1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1706 fh->fh_cs_ctl = 0;
1707 fh->fh_df_ctl = 0;
1708 fh->fh_parm_offset = 0;
1709
1710 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1711 if (!info) {
1712 fc_frame_free(fp);
1713 return -ENOMEM;
1714 }
1715
1716 info->job = job;
1717 info->lport = lport;
1718 info->rsp_code = ELS_LS_ACC;
1719 info->nents = job->reply_payload.sg_cnt;
1720 info->sg = job->reply_payload.sg_list;
1721
1722 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1723 NULL, info, tov))
1724 return -ECOMM;
1725 return 0;
1726 }
1727
1728 /**
1729 * fc_lport_ct_request() - Send CT Passthrough request
1730 * @job: The BSG Passthrough job
1731 * @lport: The local port sending the request
1732 * @did: The destination FC-ID
1733 * @tov: The timeout period to wait for the response
1734 *
1735 * Locking Note: The lport lock is expected to be held before calling
1736 * this routine.
1737 */
1738 static int fc_lport_ct_request(struct fc_bsg_job *job,
1739 struct fc_lport *lport, u32 did, u32 tov)
1740 {
1741 struct fc_bsg_info *info;
1742 struct fc_frame *fp;
1743 struct fc_frame_header *fh;
1744 struct fc_ct_req *ct;
1745 size_t len;
1746
1747 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1748 job->request_payload.payload_len);
1749 if (!fp)
1750 return -ENOMEM;
1751
1752 len = job->request_payload.payload_len;
1753 ct = fc_frame_payload_get(fp, len);
1754
1755 sg_copy_to_buffer(job->request_payload.sg_list,
1756 job->request_payload.sg_cnt,
1757 ct, len);
1758
1759 fh = fc_frame_header_get(fp);
1760 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1761 hton24(fh->fh_d_id, did);
1762 hton24(fh->fh_s_id, lport->port_id);
1763 fh->fh_type = FC_TYPE_CT;
1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1766 fh->fh_cs_ctl = 0;
1767 fh->fh_df_ctl = 0;
1768 fh->fh_parm_offset = 0;
1769
1770 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1771 if (!info) {
1772 fc_frame_free(fp);
1773 return -ENOMEM;
1774 }
1775
1776 info->job = job;
1777 info->lport = lport;
1778 info->rsp_code = FC_FS_ACC;
1779 info->nents = job->reply_payload.sg_cnt;
1780 info->sg = job->reply_payload.sg_list;
1781
1782 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1783 NULL, info, tov))
1784 return -ECOMM;
1785 return 0;
1786 }
1787
1788 /**
1789 * fc_lport_bsg_request() - The common entry point for sending
1790 * FC Passthrough requests
1791 * @job: The BSG passthrough job
1792 */
1793 int fc_lport_bsg_request(struct fc_bsg_job *job)
1794 {
1795 struct request *rsp = job->req->next_rq;
1796 struct Scsi_Host *shost = job->shost;
1797 struct fc_lport *lport = shost_priv(shost);
1798 struct fc_rport *rport;
1799 struct fc_rport_priv *rdata;
1800 int rc = -EINVAL;
1801 u32 did;
1802
1803 job->reply->reply_payload_rcv_len = 0;
1804 if (rsp)
1805 rsp->resid_len = job->reply_payload.payload_len;
1806
1807 mutex_lock(&lport->lp_mutex);
1808
1809 switch (job->request->msgcode) {
1810 case FC_BSG_RPT_ELS:
1811 rport = job->rport;
1812 if (!rport)
1813 break;
1814
1815 rdata = rport->dd_data;
1816 rc = fc_lport_els_request(job, lport, rport->port_id,
1817 rdata->e_d_tov);
1818 break;
1819
1820 case FC_BSG_RPT_CT:
1821 rport = job->rport;
1822 if (!rport)
1823 break;
1824
1825 rdata = rport->dd_data;
1826 rc = fc_lport_ct_request(job, lport, rport->port_id,
1827 rdata->e_d_tov);
1828 break;
1829
1830 case FC_BSG_HST_CT:
1831 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1832 if (did == FC_FID_DIR_SERV)
1833 rdata = lport->dns_rdata;
1834 else
1835 rdata = lport->tt.rport_lookup(lport, did);
1836
1837 if (!rdata)
1838 break;
1839
1840 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1841 break;
1842
1843 case FC_BSG_HST_ELS_NOLOGIN:
1844 did = ntoh24(job->request->rqst_data.h_els.port_id);
1845 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1846 break;
1847 }
1848
1849 mutex_unlock(&lport->lp_mutex);
1850 return rc;
1851 }
1852 EXPORT_SYMBOL(fc_lport_bsg_request);