628f347404f9b355e1fbe14bd58fdae3f0d7171d
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HIERARCHY
36 *
37 * The following hierarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the hierarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler because no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/delay.h>
92 #include <linux/slab.h>
93 #include <asm/unaligned.h>
94
95 #include <scsi/fc/fc_gs.h>
96
97 #include <scsi/libfc.h>
98 #include <scsi/fc_encode.h>
99 #include <linux/scatterlist.h>
100
101 #include "fc_libfc.h"
102
103 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
104 #define FC_LOCAL_PTP_FID_LO 0x010101
105 #define FC_LOCAL_PTP_FID_HI 0x010102
106
107 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
108
109 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
110
111 static void fc_lport_enter_reset(struct fc_lport *);
112 static void fc_lport_enter_flogi(struct fc_lport *);
113 static void fc_lport_enter_dns(struct fc_lport *);
114 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
115 static void fc_lport_enter_scr(struct fc_lport *);
116 static void fc_lport_enter_ready(struct fc_lport *);
117 static void fc_lport_enter_logo(struct fc_lport *);
118
119 static const char *fc_lport_state_names[] = {
120 [LPORT_ST_DISABLED] = "disabled",
121 [LPORT_ST_FLOGI] = "FLOGI",
122 [LPORT_ST_DNS] = "dNS",
123 [LPORT_ST_RNN_ID] = "RNN_ID",
124 [LPORT_ST_RSNN_NN] = "RSNN_NN",
125 [LPORT_ST_RSPN_ID] = "RSPN_ID",
126 [LPORT_ST_RFT_ID] = "RFT_ID",
127 [LPORT_ST_RFF_ID] = "RFF_ID",
128 [LPORT_ST_SCR] = "SCR",
129 [LPORT_ST_READY] = "Ready",
130 [LPORT_ST_LOGO] = "LOGO",
131 [LPORT_ST_RESET] = "reset",
132 };
133
134 /**
135 * struct fc_bsg_info - FC Passthrough managemet structure
136 * @job: The passthrough job
137 * @lport: The local port to pass through a command
138 * @rsp_code: The expected response code
139 * @sg: job->reply_payload.sg_list
140 * @nents: job->reply_payload.sg_cnt
141 * @offset: The offset into the response data
142 */
143 struct fc_bsg_info {
144 struct fc_bsg_job *job;
145 struct fc_lport *lport;
146 u16 rsp_code;
147 struct scatterlist *sg;
148 u32 nents;
149 size_t offset;
150 };
151
152 /**
153 * fc_frame_drop() - Dummy frame handler
154 * @lport: The local port the frame was received on
155 * @fp: The received frame
156 */
157 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
158 {
159 fc_frame_free(fp);
160 return 0;
161 }
162
163 /**
164 * fc_lport_rport_callback() - Event handler for rport events
165 * @lport: The lport which is receiving the event
166 * @rdata: private remote port data
167 * @event: The event that occurred
168 *
169 * Locking Note: The rport lock should not be held when calling
170 * this function.
171 */
172 static void fc_lport_rport_callback(struct fc_lport *lport,
173 struct fc_rport_priv *rdata,
174 enum fc_rport_event event)
175 {
176 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
177 rdata->ids.port_id);
178
179 mutex_lock(&lport->lp_mutex);
180 switch (event) {
181 case RPORT_EV_READY:
182 if (lport->state == LPORT_ST_DNS) {
183 lport->dns_rdata = rdata;
184 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
185 } else {
186 FC_LPORT_DBG(lport, "Received an READY event "
187 "on port (%6.6x) for the directory "
188 "server, but the lport is not "
189 "in the DNS state, it's in the "
190 "%d state", rdata->ids.port_id,
191 lport->state);
192 lport->tt.rport_logoff(rdata);
193 }
194 break;
195 case RPORT_EV_LOGO:
196 case RPORT_EV_FAILED:
197 case RPORT_EV_STOP:
198 lport->dns_rdata = NULL;
199 break;
200 case RPORT_EV_NONE:
201 break;
202 }
203 mutex_unlock(&lport->lp_mutex);
204 }
205
206 /**
207 * fc_lport_state() - Return a string which represents the lport's state
208 * @lport: The lport whose state is to converted to a string
209 */
210 static const char *fc_lport_state(struct fc_lport *lport)
211 {
212 const char *cp;
213
214 cp = fc_lport_state_names[lport->state];
215 if (!cp)
216 cp = "unknown";
217 return cp;
218 }
219
220 /**
221 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
222 * @lport: The lport to attach the ptp rport to
223 * @remote_fid: The FID of the ptp rport
224 * @remote_wwpn: The WWPN of the ptp rport
225 * @remote_wwnn: The WWNN of the ptp rport
226 */
227 static void fc_lport_ptp_setup(struct fc_lport *lport,
228 u32 remote_fid, u64 remote_wwpn,
229 u64 remote_wwnn)
230 {
231 mutex_lock(&lport->disc.disc_mutex);
232 if (lport->ptp_rdata) {
233 lport->tt.rport_logoff(lport->ptp_rdata);
234 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
235 }
236 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
237 kref_get(&lport->ptp_rdata->kref);
238 lport->ptp_rdata->ids.port_name = remote_wwpn;
239 lport->ptp_rdata->ids.node_name = remote_wwnn;
240 mutex_unlock(&lport->disc.disc_mutex);
241
242 lport->tt.rport_login(lport->ptp_rdata);
243
244 fc_lport_enter_ready(lport);
245 }
246
247 /**
248 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
249 * @shost: The SCSI host whose port state is to be determined
250 */
251 void fc_get_host_port_state(struct Scsi_Host *shost)
252 {
253 struct fc_lport *lport = shost_priv(shost);
254
255 mutex_lock(&lport->lp_mutex);
256 if (!lport->link_up)
257 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
258 else
259 switch (lport->state) {
260 case LPORT_ST_READY:
261 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
262 break;
263 default:
264 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
265 }
266 mutex_unlock(&lport->lp_mutex);
267 }
268 EXPORT_SYMBOL(fc_get_host_port_state);
269
270 /**
271 * fc_get_host_speed() - Return the speed of the given Scsi_Host
272 * @shost: The SCSI host whose port speed is to be determined
273 */
274 void fc_get_host_speed(struct Scsi_Host *shost)
275 {
276 struct fc_lport *lport = shost_priv(shost);
277
278 fc_host_speed(shost) = lport->link_speed;
279 }
280 EXPORT_SYMBOL(fc_get_host_speed);
281
282 /**
283 * fc_get_host_stats() - Return the Scsi_Host's statistics
284 * @shost: The SCSI host whose statistics are to be returned
285 */
286 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
287 {
288 struct fc_host_statistics *fcoe_stats;
289 struct fc_lport *lport = shost_priv(shost);
290 struct timespec v0, v1;
291 unsigned int cpu;
292 u64 fcp_in_bytes = 0;
293 u64 fcp_out_bytes = 0;
294
295 fcoe_stats = &lport->host_stats;
296 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
297
298 jiffies_to_timespec(jiffies, &v0);
299 jiffies_to_timespec(lport->boot_time, &v1);
300 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
301
302 for_each_possible_cpu(cpu) {
303 struct fcoe_dev_stats *stats;
304
305 stats = per_cpu_ptr(lport->dev_stats, cpu);
306
307 fcoe_stats->tx_frames += stats->TxFrames;
308 fcoe_stats->tx_words += stats->TxWords;
309 fcoe_stats->rx_frames += stats->RxFrames;
310 fcoe_stats->rx_words += stats->RxWords;
311 fcoe_stats->error_frames += stats->ErrorFrames;
312 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
313 fcoe_stats->fcp_input_requests += stats->InputRequests;
314 fcoe_stats->fcp_output_requests += stats->OutputRequests;
315 fcoe_stats->fcp_control_requests += stats->ControlRequests;
316 fcp_in_bytes += stats->InputBytes;
317 fcp_out_bytes += stats->OutputBytes;
318 fcoe_stats->link_failure_count += stats->LinkFailureCount;
319 }
320 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
321 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
322 fcoe_stats->lip_count = -1;
323 fcoe_stats->nos_count = -1;
324 fcoe_stats->loss_of_sync_count = -1;
325 fcoe_stats->loss_of_signal_count = -1;
326 fcoe_stats->prim_seq_protocol_err_count = -1;
327 fcoe_stats->dumped_frames = -1;
328 return fcoe_stats;
329 }
330 EXPORT_SYMBOL(fc_get_host_stats);
331
332 /**
333 * fc_lport_flogi_fill() - Fill in FLOGI command for request
334 * @lport: The local port the FLOGI is for
335 * @flogi: The FLOGI command
336 * @op: The opcode
337 */
338 static void fc_lport_flogi_fill(struct fc_lport *lport,
339 struct fc_els_flogi *flogi,
340 unsigned int op)
341 {
342 struct fc_els_csp *sp;
343 struct fc_els_cssp *cp;
344
345 memset(flogi, 0, sizeof(*flogi));
346 flogi->fl_cmd = (u8) op;
347 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
348 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
349 sp = &flogi->fl_csp;
350 sp->sp_hi_ver = 0x20;
351 sp->sp_lo_ver = 0x20;
352 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
353 sp->sp_bb_data = htons((u16) lport->mfs);
354 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
355 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
356 if (op != ELS_FLOGI) {
357 sp->sp_features = htons(FC_SP_FT_CIRO);
358 sp->sp_tot_seq = htons(255); /* seq. we accept */
359 sp->sp_rel_off = htons(0x1f);
360 sp->sp_e_d_tov = htonl(lport->e_d_tov);
361
362 cp->cp_rdfs = htons((u16) lport->mfs);
363 cp->cp_con_seq = htons(255);
364 cp->cp_open_seq = 1;
365 }
366 }
367
368 /**
369 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
370 * @lport: The local port to add a new FC-4 type to
371 * @type: The new FC-4 type
372 */
373 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
374 {
375 __be32 *mp;
376
377 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
378 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
379 }
380
381 /**
382 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
383 * @lport: Fibre Channel local port receiving the RLIR
384 * @fp: The RLIR request frame
385 *
386 * Locking Note: The lport lock is expected to be held before calling
387 * this function.
388 */
389 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
390 {
391 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
392 fc_lport_state(lport));
393
394 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
395 fc_frame_free(fp);
396 }
397
398 /**
399 * fc_lport_recv_echo_req() - Handle received ECHO request
400 * @lport: The local port receiving the ECHO
401 * @fp: ECHO request frame
402 *
403 * Locking Note: The lport lock is expected to be held before calling
404 * this function.
405 */
406 static void fc_lport_recv_echo_req(struct fc_lport *lport,
407 struct fc_frame *in_fp)
408 {
409 struct fc_frame *fp;
410 unsigned int len;
411 void *pp;
412 void *dp;
413
414 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
415 fc_lport_state(lport));
416
417 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
418 pp = fc_frame_payload_get(in_fp, len);
419
420 if (len < sizeof(__be32))
421 len = sizeof(__be32);
422
423 fp = fc_frame_alloc(lport, len);
424 if (fp) {
425 dp = fc_frame_payload_get(fp, len);
426 memcpy(dp, pp, len);
427 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
428 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
429 lport->tt.frame_send(lport, fp);
430 }
431 fc_frame_free(in_fp);
432 }
433
434 /**
435 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
436 * @lport: The local port receiving the RNID
437 * @fp: The RNID request frame
438 *
439 * Locking Note: The lport lock is expected to be held before calling
440 * this function.
441 */
442 static void fc_lport_recv_rnid_req(struct fc_lport *lport,
443 struct fc_frame *in_fp)
444 {
445 struct fc_frame *fp;
446 struct fc_els_rnid *req;
447 struct {
448 struct fc_els_rnid_resp rnid;
449 struct fc_els_rnid_cid cid;
450 struct fc_els_rnid_gen gen;
451 } *rp;
452 struct fc_seq_els_data rjt_data;
453 u8 fmt;
454 size_t len;
455
456 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
457 fc_lport_state(lport));
458
459 req = fc_frame_payload_get(in_fp, sizeof(*req));
460 if (!req) {
461 rjt_data.reason = ELS_RJT_LOGIC;
462 rjt_data.explan = ELS_EXPL_NONE;
463 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
464 } else {
465 fmt = req->rnid_fmt;
466 len = sizeof(*rp);
467 if (fmt != ELS_RNIDF_GEN ||
468 ntohl(lport->rnid_gen.rnid_atype) == 0) {
469 fmt = ELS_RNIDF_NONE; /* nothing to provide */
470 len -= sizeof(rp->gen);
471 }
472 fp = fc_frame_alloc(lport, len);
473 if (fp) {
474 rp = fc_frame_payload_get(fp, len);
475 memset(rp, 0, len);
476 rp->rnid.rnid_cmd = ELS_LS_ACC;
477 rp->rnid.rnid_fmt = fmt;
478 rp->rnid.rnid_cid_len = sizeof(rp->cid);
479 rp->cid.rnid_wwpn = htonll(lport->wwpn);
480 rp->cid.rnid_wwnn = htonll(lport->wwnn);
481 if (fmt == ELS_RNIDF_GEN) {
482 rp->rnid.rnid_sid_len = sizeof(rp->gen);
483 memcpy(&rp->gen, &lport->rnid_gen,
484 sizeof(rp->gen));
485 }
486 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
487 lport->tt.frame_send(lport, fp);
488 }
489 }
490 fc_frame_free(in_fp);
491 }
492
493 /**
494 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
495 * @lport: The local port receiving the LOGO
496 * @fp: The LOGO request frame
497 *
498 * Locking Note: The lport lock is exected to be held before calling
499 * this function.
500 */
501 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
502 {
503 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
504 fc_lport_enter_reset(lport);
505 fc_frame_free(fp);
506 }
507
508 /**
509 * fc_fabric_login() - Start the lport state machine
510 * @lport: The local port that should log into the fabric
511 *
512 * Locking Note: This function should not be called
513 * with the lport lock held.
514 */
515 int fc_fabric_login(struct fc_lport *lport)
516 {
517 int rc = -1;
518
519 mutex_lock(&lport->lp_mutex);
520 if (lport->state == LPORT_ST_DISABLED ||
521 lport->state == LPORT_ST_LOGO) {
522 fc_lport_state_enter(lport, LPORT_ST_RESET);
523 fc_lport_enter_reset(lport);
524 rc = 0;
525 }
526 mutex_unlock(&lport->lp_mutex);
527
528 return rc;
529 }
530 EXPORT_SYMBOL(fc_fabric_login);
531
532 /**
533 * __fc_linkup() - Handler for transport linkup events
534 * @lport: The lport whose link is up
535 *
536 * Locking: must be called with the lp_mutex held
537 */
538 void __fc_linkup(struct fc_lport *lport)
539 {
540 if (!lport->link_up) {
541 lport->link_up = 1;
542
543 if (lport->state == LPORT_ST_RESET)
544 fc_lport_enter_flogi(lport);
545 }
546 }
547
548 /**
549 * fc_linkup() - Handler for transport linkup events
550 * @lport: The local port whose link is up
551 */
552 void fc_linkup(struct fc_lport *lport)
553 {
554 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
555 lport->host->host_no, lport->port_id);
556
557 mutex_lock(&lport->lp_mutex);
558 __fc_linkup(lport);
559 mutex_unlock(&lport->lp_mutex);
560 }
561 EXPORT_SYMBOL(fc_linkup);
562
563 /**
564 * __fc_linkdown() - Handler for transport linkdown events
565 * @lport: The lport whose link is down
566 *
567 * Locking: must be called with the lp_mutex held
568 */
569 void __fc_linkdown(struct fc_lport *lport)
570 {
571 if (lport->link_up) {
572 lport->link_up = 0;
573 fc_lport_enter_reset(lport);
574 lport->tt.fcp_cleanup(lport);
575 }
576 }
577
578 /**
579 * fc_linkdown() - Handler for transport linkdown events
580 * @lport: The local port whose link is down
581 */
582 void fc_linkdown(struct fc_lport *lport)
583 {
584 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
585 lport->host->host_no, lport->port_id);
586
587 mutex_lock(&lport->lp_mutex);
588 __fc_linkdown(lport);
589 mutex_unlock(&lport->lp_mutex);
590 }
591 EXPORT_SYMBOL(fc_linkdown);
592
593 /**
594 * fc_fabric_logoff() - Logout of the fabric
595 * @lport: The local port to logoff the fabric
596 *
597 * Return value:
598 * 0 for success, -1 for failure
599 */
600 int fc_fabric_logoff(struct fc_lport *lport)
601 {
602 lport->tt.disc_stop_final(lport);
603 mutex_lock(&lport->lp_mutex);
604 if (lport->dns_rdata)
605 lport->tt.rport_logoff(lport->dns_rdata);
606 mutex_unlock(&lport->lp_mutex);
607 lport->tt.rport_flush_queue();
608 mutex_lock(&lport->lp_mutex);
609 fc_lport_enter_logo(lport);
610 mutex_unlock(&lport->lp_mutex);
611 cancel_delayed_work_sync(&lport->retry_work);
612 return 0;
613 }
614 EXPORT_SYMBOL(fc_fabric_logoff);
615
616 /**
617 * fc_lport_destroy() - Unregister a fc_lport
618 * @lport: The local port to unregister
619 *
620 * Note:
621 * exit routine for fc_lport instance
622 * clean-up all the allocated memory
623 * and free up other system resources.
624 *
625 */
626 int fc_lport_destroy(struct fc_lport *lport)
627 {
628 mutex_lock(&lport->lp_mutex);
629 lport->state = LPORT_ST_DISABLED;
630 lport->link_up = 0;
631 lport->tt.frame_send = fc_frame_drop;
632 mutex_unlock(&lport->lp_mutex);
633
634 lport->tt.fcp_abort_io(lport);
635 lport->tt.disc_stop_final(lport);
636 lport->tt.exch_mgr_reset(lport, 0, 0);
637 fc_fc4_del_lport(lport);
638 return 0;
639 }
640 EXPORT_SYMBOL(fc_lport_destroy);
641
642 /**
643 * fc_set_mfs() - Set the maximum frame size for a local port
644 * @lport: The local port to set the MFS for
645 * @mfs: The new MFS
646 */
647 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
648 {
649 unsigned int old_mfs;
650 int rc = -EINVAL;
651
652 mutex_lock(&lport->lp_mutex);
653
654 old_mfs = lport->mfs;
655
656 if (mfs >= FC_MIN_MAX_FRAME) {
657 mfs &= ~3;
658 if (mfs > FC_MAX_FRAME)
659 mfs = FC_MAX_FRAME;
660 mfs -= sizeof(struct fc_frame_header);
661 lport->mfs = mfs;
662 rc = 0;
663 }
664
665 if (!rc && mfs < old_mfs)
666 fc_lport_enter_reset(lport);
667
668 mutex_unlock(&lport->lp_mutex);
669
670 return rc;
671 }
672 EXPORT_SYMBOL(fc_set_mfs);
673
674 /**
675 * fc_lport_disc_callback() - Callback for discovery events
676 * @lport: The local port receiving the event
677 * @event: The discovery event
678 */
679 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
680 {
681 switch (event) {
682 case DISC_EV_SUCCESS:
683 FC_LPORT_DBG(lport, "Discovery succeeded\n");
684 break;
685 case DISC_EV_FAILED:
686 printk(KERN_ERR "host%d: libfc: "
687 "Discovery failed for port (%6.6x)\n",
688 lport->host->host_no, lport->port_id);
689 mutex_lock(&lport->lp_mutex);
690 fc_lport_enter_reset(lport);
691 mutex_unlock(&lport->lp_mutex);
692 break;
693 case DISC_EV_NONE:
694 WARN_ON(1);
695 break;
696 }
697 }
698
699 /**
700 * fc_rport_enter_ready() - Enter the ready state and start discovery
701 * @lport: The local port that is ready
702 *
703 * Locking Note: The lport lock is expected to be held before calling
704 * this routine.
705 */
706 static void fc_lport_enter_ready(struct fc_lport *lport)
707 {
708 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
709 fc_lport_state(lport));
710
711 fc_lport_state_enter(lport, LPORT_ST_READY);
712 if (lport->vport)
713 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
714 fc_vports_linkchange(lport);
715
716 if (!lport->ptp_rdata)
717 lport->tt.disc_start(fc_lport_disc_callback, lport);
718 }
719
720 /**
721 * fc_lport_set_port_id() - set the local port Port ID
722 * @lport: The local port which will have its Port ID set.
723 * @port_id: The new port ID.
724 * @fp: The frame containing the incoming request, or NULL.
725 *
726 * Locking Note: The lport lock is expected to be held before calling
727 * this function.
728 */
729 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
730 struct fc_frame *fp)
731 {
732 if (port_id)
733 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
734 lport->host->host_no, port_id);
735
736 lport->port_id = port_id;
737
738 /* Update the fc_host */
739 fc_host_port_id(lport->host) = port_id;
740
741 if (lport->tt.lport_set_port_id)
742 lport->tt.lport_set_port_id(lport, port_id, fp);
743 }
744
745 /**
746 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
747 * @lport: The local port which will have its Port ID set.
748 * @port_id: The new port ID.
749 *
750 * Called by the lower-level driver when transport sets the local port_id.
751 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
752 * discovery to be skipped.
753 */
754 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
755 {
756 mutex_lock(&lport->lp_mutex);
757
758 fc_lport_set_port_id(lport, port_id, NULL);
759
760 switch (lport->state) {
761 case LPORT_ST_RESET:
762 case LPORT_ST_FLOGI:
763 if (port_id)
764 fc_lport_enter_ready(lport);
765 break;
766 default:
767 break;
768 }
769 mutex_unlock(&lport->lp_mutex);
770 }
771 EXPORT_SYMBOL(fc_lport_set_local_id);
772
773 /**
774 * fc_lport_recv_flogi_req() - Receive a FLOGI request
775 * @lport: The local port that received the request
776 * @rx_fp: The FLOGI frame
777 *
778 * A received FLOGI request indicates a point-to-point connection.
779 * Accept it with the common service parameters indicating our N port.
780 * Set up to do a PLOGI if we have the higher-number WWPN.
781 *
782 * Locking Note: The lport lock is expected to be held before calling
783 * this function.
784 */
785 static void fc_lport_recv_flogi_req(struct fc_lport *lport,
786 struct fc_frame *rx_fp)
787 {
788 struct fc_frame *fp;
789 struct fc_frame_header *fh;
790 struct fc_els_flogi *flp;
791 struct fc_els_flogi *new_flp;
792 u64 remote_wwpn;
793 u32 remote_fid;
794 u32 local_fid;
795
796 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
797 fc_lport_state(lport));
798
799 remote_fid = fc_frame_sid(rx_fp);
800 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
801 if (!flp)
802 goto out;
803 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
804 if (remote_wwpn == lport->wwpn) {
805 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
806 "with same WWPN %16.16llx\n",
807 lport->host->host_no, remote_wwpn);
808 goto out;
809 }
810 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
811
812 /*
813 * XXX what is the right thing to do for FIDs?
814 * The originator might expect our S_ID to be 0xfffffe.
815 * But if so, both of us could end up with the same FID.
816 */
817 local_fid = FC_LOCAL_PTP_FID_LO;
818 if (remote_wwpn < lport->wwpn) {
819 local_fid = FC_LOCAL_PTP_FID_HI;
820 if (!remote_fid || remote_fid == local_fid)
821 remote_fid = FC_LOCAL_PTP_FID_LO;
822 } else if (!remote_fid) {
823 remote_fid = FC_LOCAL_PTP_FID_HI;
824 }
825
826 fc_lport_set_port_id(lport, local_fid, rx_fp);
827
828 fp = fc_frame_alloc(lport, sizeof(*flp));
829 if (fp) {
830 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
831 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
832 new_flp->fl_cmd = (u8) ELS_LS_ACC;
833
834 /*
835 * Send the response. If this fails, the originator should
836 * repeat the sequence.
837 */
838 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
839 fh = fc_frame_header_get(fp);
840 hton24(fh->fh_s_id, local_fid);
841 hton24(fh->fh_d_id, remote_fid);
842 lport->tt.frame_send(lport, fp);
843
844 } else {
845 fc_lport_error(lport, fp);
846 }
847 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
848 get_unaligned_be64(&flp->fl_wwnn));
849 out:
850 fc_frame_free(rx_fp);
851 }
852
853 /**
854 * fc_lport_recv_els_req() - The generic lport ELS request handler
855 * @lport: The local port that received the request
856 * @fp: The request frame
857 *
858 * This function will see if the lport handles the request or
859 * if an rport should handle the request.
860 *
861 * Locking Note: This function should not be called with the lport
862 * lock held because it will grab the lock.
863 */
864 static void fc_lport_recv_els_req(struct fc_lport *lport,
865 struct fc_frame *fp)
866 {
867 void (*recv)(struct fc_lport *, struct fc_frame *);
868
869 mutex_lock(&lport->lp_mutex);
870
871 /*
872 * Handle special ELS cases like FLOGI, LOGO, and
873 * RSCN here. These don't require a session.
874 * Even if we had a session, it might not be ready.
875 */
876 if (!lport->link_up)
877 fc_frame_free(fp);
878 else {
879 /*
880 * Check opcode.
881 */
882 recv = lport->tt.rport_recv_req;
883 switch (fc_frame_payload_op(fp)) {
884 case ELS_FLOGI:
885 if (!lport->point_to_multipoint)
886 recv = fc_lport_recv_flogi_req;
887 break;
888 case ELS_LOGO:
889 if (fc_frame_sid(fp) == FC_FID_FLOGI)
890 recv = fc_lport_recv_logo_req;
891 break;
892 case ELS_RSCN:
893 recv = lport->tt.disc_recv_req;
894 break;
895 case ELS_ECHO:
896 recv = fc_lport_recv_echo_req;
897 break;
898 case ELS_RLIR:
899 recv = fc_lport_recv_rlir_req;
900 break;
901 case ELS_RNID:
902 recv = fc_lport_recv_rnid_req;
903 break;
904 }
905
906 recv(lport, fp);
907 }
908 mutex_unlock(&lport->lp_mutex);
909 }
910
911 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
912 const struct fc_els_spp *spp_in,
913 struct fc_els_spp *spp_out)
914 {
915 return FC_SPP_RESP_INVL;
916 }
917
918 struct fc4_prov fc_lport_els_prov = {
919 .prli = fc_lport_els_prli,
920 .recv = fc_lport_recv_els_req,
921 };
922
923 /**
924 * fc_lport_recv_req() - The generic lport request handler
925 * @lport: The lport that received the request
926 * @fp: The frame the request is in
927 *
928 * Locking Note: This function should not be called with the lport
929 * lock held because it may grab the lock.
930 */
931 static void fc_lport_recv_req(struct fc_lport *lport,
932 struct fc_frame *fp)
933 {
934 struct fc_frame_header *fh = fc_frame_header_get(fp);
935 struct fc_seq *sp = fr_seq(fp);
936 struct fc4_prov *prov;
937
938 /*
939 * Use RCU read lock and module_lock to be sure module doesn't
940 * deregister and get unloaded while we're calling it.
941 * try_module_get() is inlined and accepts a NULL parameter.
942 * Only ELSes and FCP target ops should come through here.
943 * The locking is unfortunate, and a better scheme is being sought.
944 */
945
946 rcu_read_lock();
947 if (fh->fh_type >= FC_FC4_PROV_SIZE)
948 goto drop;
949 prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
950 if (!prov || !try_module_get(prov->module))
951 goto drop;
952 rcu_read_unlock();
953 prov->recv(lport, fp);
954 module_put(prov->module);
955 return;
956 drop:
957 rcu_read_unlock();
958 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
959 fc_frame_free(fp);
960 lport->tt.exch_done(sp);
961 }
962
963 /**
964 * fc_lport_reset() - Reset a local port
965 * @lport: The local port which should be reset
966 *
967 * Locking Note: This functions should not be called with the
968 * lport lock held.
969 */
970 int fc_lport_reset(struct fc_lport *lport)
971 {
972 cancel_delayed_work_sync(&lport->retry_work);
973 mutex_lock(&lport->lp_mutex);
974 fc_lport_enter_reset(lport);
975 mutex_unlock(&lport->lp_mutex);
976 return 0;
977 }
978 EXPORT_SYMBOL(fc_lport_reset);
979
980 /**
981 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
982 * @lport: The local port to be reset
983 *
984 * Locking Note: The lport lock is expected to be held before calling
985 * this routine.
986 */
987 static void fc_lport_reset_locked(struct fc_lport *lport)
988 {
989 if (lport->dns_rdata)
990 lport->tt.rport_logoff(lport->dns_rdata);
991
992 if (lport->ptp_rdata) {
993 lport->tt.rport_logoff(lport->ptp_rdata);
994 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
995 lport->ptp_rdata = NULL;
996 }
997
998 lport->tt.disc_stop(lport);
999
1000 lport->tt.exch_mgr_reset(lport, 0, 0);
1001 fc_host_fabric_name(lport->host) = 0;
1002
1003 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
1004 fc_lport_set_port_id(lport, 0, NULL);
1005 }
1006
1007 /**
1008 * fc_lport_enter_reset() - Reset the local port
1009 * @lport: The local port to be reset
1010 *
1011 * Locking Note: The lport lock is expected to be held before calling
1012 * this routine.
1013 */
1014 static void fc_lport_enter_reset(struct fc_lport *lport)
1015 {
1016 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
1017 fc_lport_state(lport));
1018
1019 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
1020 return;
1021
1022 if (lport->vport) {
1023 if (lport->link_up)
1024 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
1025 else
1026 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1027 }
1028 fc_lport_state_enter(lport, LPORT_ST_RESET);
1029 fc_host_post_event(lport->host, fc_get_event_number(),
1030 FCH_EVT_LIPRESET, 0);
1031 fc_vports_linkchange(lport);
1032 fc_lport_reset_locked(lport);
1033 if (lport->link_up) {
1034 /*
1035 * Wait upto resource allocation time out before
1036 * doing re-login since incomplete FIP exchanged
1037 * from last session may collide with exchanges
1038 * in new session.
1039 */
1040 msleep(lport->r_a_tov);
1041 fc_lport_enter_flogi(lport);
1042 }
1043 }
1044
1045 /**
1046 * fc_lport_enter_disabled() - Disable the local port
1047 * @lport: The local port to be reset
1048 *
1049 * Locking Note: The lport lock is expected to be held before calling
1050 * this routine.
1051 */
1052 static void fc_lport_enter_disabled(struct fc_lport *lport)
1053 {
1054 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1055 fc_lport_state(lport));
1056
1057 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1058 fc_vports_linkchange(lport);
1059 fc_lport_reset_locked(lport);
1060 }
1061
1062 /**
1063 * fc_lport_error() - Handler for any errors
1064 * @lport: The local port that the error was on
1065 * @fp: The error code encoded in a frame pointer
1066 *
1067 * If the error was caused by a resource allocation failure
1068 * then wait for half a second and retry, otherwise retry
1069 * after the e_d_tov time.
1070 */
1071 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1072 {
1073 unsigned long delay = 0;
1074 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1075 PTR_ERR(fp), fc_lport_state(lport),
1076 lport->retry_count);
1077
1078 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1079 return;
1080
1081 /*
1082 * Memory allocation failure, or the exchange timed out
1083 * or we received LS_RJT.
1084 * Retry after delay
1085 */
1086 if (lport->retry_count < lport->max_retry_count) {
1087 lport->retry_count++;
1088 if (!fp)
1089 delay = msecs_to_jiffies(500);
1090 else
1091 delay = msecs_to_jiffies(lport->e_d_tov);
1092
1093 schedule_delayed_work(&lport->retry_work, delay);
1094 } else
1095 fc_lport_enter_reset(lport);
1096 }
1097
1098 /**
1099 * fc_lport_ns_resp() - Handle response to a name server
1100 * registration exchange
1101 * @sp: current sequence in exchange
1102 * @fp: response frame
1103 * @lp_arg: Fibre Channel host port instance
1104 *
1105 * Locking Note: This function will be called without the lport lock
1106 * held, but it will lock, call an _enter_* function or fc_lport_error()
1107 * and then unlock the lport.
1108 */
1109 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1110 void *lp_arg)
1111 {
1112 struct fc_lport *lport = lp_arg;
1113 struct fc_frame_header *fh;
1114 struct fc_ct_hdr *ct;
1115
1116 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1117
1118 if (fp == ERR_PTR(-FC_EX_CLOSED))
1119 return;
1120
1121 mutex_lock(&lport->lp_mutex);
1122
1123 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1124 FC_LPORT_DBG(lport, "Received a name server response, "
1125 "but in state %s\n", fc_lport_state(lport));
1126 if (IS_ERR(fp))
1127 goto err;
1128 goto out;
1129 }
1130
1131 if (IS_ERR(fp)) {
1132 fc_lport_error(lport, fp);
1133 goto err;
1134 }
1135
1136 fh = fc_frame_header_get(fp);
1137 ct = fc_frame_payload_get(fp, sizeof(*ct));
1138
1139 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1140 ct->ct_fs_type == FC_FST_DIR &&
1141 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1142 ntohs(ct->ct_cmd) == FC_FS_ACC)
1143 switch (lport->state) {
1144 case LPORT_ST_RNN_ID:
1145 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1146 break;
1147 case LPORT_ST_RSNN_NN:
1148 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1149 break;
1150 case LPORT_ST_RSPN_ID:
1151 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1152 break;
1153 case LPORT_ST_RFT_ID:
1154 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1155 break;
1156 case LPORT_ST_RFF_ID:
1157 fc_lport_enter_scr(lport);
1158 break;
1159 default:
1160 /* should have already been caught by state checks */
1161 break;
1162 }
1163 else
1164 fc_lport_error(lport, fp);
1165 out:
1166 fc_frame_free(fp);
1167 err:
1168 mutex_unlock(&lport->lp_mutex);
1169 }
1170
1171 /**
1172 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1173 * @sp: current sequence in SCR exchange
1174 * @fp: response frame
1175 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1176 *
1177 * Locking Note: This function will be called without the lport lock
1178 * held, but it will lock, call an _enter_* function or fc_lport_error
1179 * and then unlock the lport.
1180 */
1181 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1182 void *lp_arg)
1183 {
1184 struct fc_lport *lport = lp_arg;
1185 u8 op;
1186
1187 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1188
1189 if (fp == ERR_PTR(-FC_EX_CLOSED))
1190 return;
1191
1192 mutex_lock(&lport->lp_mutex);
1193
1194 if (lport->state != LPORT_ST_SCR) {
1195 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1196 "%s\n", fc_lport_state(lport));
1197 if (IS_ERR(fp))
1198 goto err;
1199 goto out;
1200 }
1201
1202 if (IS_ERR(fp)) {
1203 fc_lport_error(lport, fp);
1204 goto err;
1205 }
1206
1207 op = fc_frame_payload_op(fp);
1208 if (op == ELS_LS_ACC)
1209 fc_lport_enter_ready(lport);
1210 else
1211 fc_lport_error(lport, fp);
1212
1213 out:
1214 fc_frame_free(fp);
1215 err:
1216 mutex_unlock(&lport->lp_mutex);
1217 }
1218
1219 /**
1220 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1221 * @lport: The local port to register for state changes
1222 *
1223 * Locking Note: The lport lock is expected to be held before calling
1224 * this routine.
1225 */
1226 static void fc_lport_enter_scr(struct fc_lport *lport)
1227 {
1228 struct fc_frame *fp;
1229
1230 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1231 fc_lport_state(lport));
1232
1233 fc_lport_state_enter(lport, LPORT_ST_SCR);
1234
1235 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1236 if (!fp) {
1237 fc_lport_error(lport, fp);
1238 return;
1239 }
1240
1241 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1242 fc_lport_scr_resp, lport,
1243 2 * lport->r_a_tov))
1244 fc_lport_error(lport, NULL);
1245 }
1246
1247 /**
1248 * fc_lport_enter_ns() - register some object with the name server
1249 * @lport: Fibre Channel local port to register
1250 *
1251 * Locking Note: The lport lock is expected to be held before calling
1252 * this routine.
1253 */
1254 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1255 {
1256 struct fc_frame *fp;
1257 enum fc_ns_req cmd;
1258 int size = sizeof(struct fc_ct_hdr);
1259 size_t len;
1260
1261 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1262 fc_lport_state_names[state],
1263 fc_lport_state(lport));
1264
1265 fc_lport_state_enter(lport, state);
1266
1267 switch (state) {
1268 case LPORT_ST_RNN_ID:
1269 cmd = FC_NS_RNN_ID;
1270 size += sizeof(struct fc_ns_rn_id);
1271 break;
1272 case LPORT_ST_RSNN_NN:
1273 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1274 /* if there is no symbolic name, skip to RFT_ID */
1275 if (!len)
1276 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1277 cmd = FC_NS_RSNN_NN;
1278 size += sizeof(struct fc_ns_rsnn) + len;
1279 break;
1280 case LPORT_ST_RSPN_ID:
1281 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1282 /* if there is no symbolic name, skip to RFT_ID */
1283 if (!len)
1284 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1285 cmd = FC_NS_RSPN_ID;
1286 size += sizeof(struct fc_ns_rspn) + len;
1287 break;
1288 case LPORT_ST_RFT_ID:
1289 cmd = FC_NS_RFT_ID;
1290 size += sizeof(struct fc_ns_rft);
1291 break;
1292 case LPORT_ST_RFF_ID:
1293 cmd = FC_NS_RFF_ID;
1294 size += sizeof(struct fc_ns_rff_id);
1295 break;
1296 default:
1297 fc_lport_error(lport, NULL);
1298 return;
1299 }
1300
1301 fp = fc_frame_alloc(lport, size);
1302 if (!fp) {
1303 fc_lport_error(lport, fp);
1304 return;
1305 }
1306
1307 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1308 fc_lport_ns_resp,
1309 lport, 3 * lport->r_a_tov))
1310 fc_lport_error(lport, fp);
1311 }
1312
1313 static struct fc_rport_operations fc_lport_rport_ops = {
1314 .event_callback = fc_lport_rport_callback,
1315 };
1316
1317 /**
1318 * fc_rport_enter_dns() - Create a fc_rport for the name server
1319 * @lport: The local port requesting a remote port for the name server
1320 *
1321 * Locking Note: The lport lock is expected to be held before calling
1322 * this routine.
1323 */
1324 static void fc_lport_enter_dns(struct fc_lport *lport)
1325 {
1326 struct fc_rport_priv *rdata;
1327
1328 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1329 fc_lport_state(lport));
1330
1331 fc_lport_state_enter(lport, LPORT_ST_DNS);
1332
1333 mutex_lock(&lport->disc.disc_mutex);
1334 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1335 mutex_unlock(&lport->disc.disc_mutex);
1336 if (!rdata)
1337 goto err;
1338
1339 rdata->ops = &fc_lport_rport_ops;
1340 lport->tt.rport_login(rdata);
1341 return;
1342
1343 err:
1344 fc_lport_error(lport, NULL);
1345 }
1346
1347 /**
1348 * fc_lport_timeout() - Handler for the retry_work timer
1349 * @work: The work struct of the local port
1350 */
1351 static void fc_lport_timeout(struct work_struct *work)
1352 {
1353 struct fc_lport *lport =
1354 container_of(work, struct fc_lport,
1355 retry_work.work);
1356
1357 mutex_lock(&lport->lp_mutex);
1358
1359 switch (lport->state) {
1360 case LPORT_ST_DISABLED:
1361 WARN_ON(1);
1362 break;
1363 case LPORT_ST_READY:
1364 break;
1365 case LPORT_ST_RESET:
1366 break;
1367 case LPORT_ST_FLOGI:
1368 fc_lport_enter_flogi(lport);
1369 break;
1370 case LPORT_ST_DNS:
1371 fc_lport_enter_dns(lport);
1372 break;
1373 case LPORT_ST_RNN_ID:
1374 case LPORT_ST_RSNN_NN:
1375 case LPORT_ST_RSPN_ID:
1376 case LPORT_ST_RFT_ID:
1377 case LPORT_ST_RFF_ID:
1378 fc_lport_enter_ns(lport, lport->state);
1379 break;
1380 case LPORT_ST_SCR:
1381 fc_lport_enter_scr(lport);
1382 break;
1383 case LPORT_ST_LOGO:
1384 fc_lport_enter_logo(lport);
1385 break;
1386 }
1387
1388 mutex_unlock(&lport->lp_mutex);
1389 }
1390
1391 /**
1392 * fc_lport_logo_resp() - Handle response to LOGO request
1393 * @sp: The sequence that the LOGO was on
1394 * @fp: The LOGO frame
1395 * @lp_arg: The lport port that received the LOGO request
1396 *
1397 * Locking Note: This function will be called without the lport lock
1398 * held, but it will lock, call an _enter_* function or fc_lport_error()
1399 * and then unlock the lport.
1400 */
1401 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1402 void *lp_arg)
1403 {
1404 struct fc_lport *lport = lp_arg;
1405 u8 op;
1406
1407 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1408
1409 if (fp == ERR_PTR(-FC_EX_CLOSED))
1410 return;
1411
1412 mutex_lock(&lport->lp_mutex);
1413
1414 if (lport->state != LPORT_ST_LOGO) {
1415 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1416 "%s\n", fc_lport_state(lport));
1417 if (IS_ERR(fp))
1418 goto err;
1419 goto out;
1420 }
1421
1422 if (IS_ERR(fp)) {
1423 fc_lport_error(lport, fp);
1424 goto err;
1425 }
1426
1427 op = fc_frame_payload_op(fp);
1428 if (op == ELS_LS_ACC)
1429 fc_lport_enter_disabled(lport);
1430 else
1431 fc_lport_error(lport, fp);
1432
1433 out:
1434 fc_frame_free(fp);
1435 err:
1436 mutex_unlock(&lport->lp_mutex);
1437 }
1438 EXPORT_SYMBOL(fc_lport_logo_resp);
1439
1440 /**
1441 * fc_rport_enter_logo() - Logout of the fabric
1442 * @lport: The local port to be logged out
1443 *
1444 * Locking Note: The lport lock is expected to be held before calling
1445 * this routine.
1446 */
1447 static void fc_lport_enter_logo(struct fc_lport *lport)
1448 {
1449 struct fc_frame *fp;
1450 struct fc_els_logo *logo;
1451
1452 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1453 fc_lport_state(lport));
1454
1455 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1456 fc_vports_linkchange(lport);
1457
1458 fp = fc_frame_alloc(lport, sizeof(*logo));
1459 if (!fp) {
1460 fc_lport_error(lport, fp);
1461 return;
1462 }
1463
1464 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1465 fc_lport_logo_resp, lport,
1466 2 * lport->r_a_tov))
1467 fc_lport_error(lport, NULL);
1468 }
1469
1470 /**
1471 * fc_lport_flogi_resp() - Handle response to FLOGI request
1472 * @sp: The sequence that the FLOGI was on
1473 * @fp: The FLOGI response frame
1474 * @lp_arg: The lport port that received the FLOGI response
1475 *
1476 * Locking Note: This function will be called without the lport lock
1477 * held, but it will lock, call an _enter_* function or fc_lport_error()
1478 * and then unlock the lport.
1479 */
1480 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1481 void *lp_arg)
1482 {
1483 struct fc_lport *lport = lp_arg;
1484 struct fc_els_flogi *flp;
1485 u32 did;
1486 u16 csp_flags;
1487 unsigned int r_a_tov;
1488 unsigned int e_d_tov;
1489 u16 mfs;
1490
1491 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1492
1493 if (fp == ERR_PTR(-FC_EX_CLOSED))
1494 return;
1495
1496 mutex_lock(&lport->lp_mutex);
1497
1498 if (lport->state != LPORT_ST_FLOGI) {
1499 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1500 "%s\n", fc_lport_state(lport));
1501 if (IS_ERR(fp))
1502 goto err;
1503 goto out;
1504 }
1505
1506 if (IS_ERR(fp)) {
1507 fc_lport_error(lport, fp);
1508 goto err;
1509 }
1510
1511 did = fc_frame_did(fp);
1512 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
1513 flp = fc_frame_payload_get(fp, sizeof(*flp));
1514 if (flp) {
1515 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1516 FC_SP_BB_DATA_MASK;
1517 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1518 mfs < lport->mfs)
1519 lport->mfs = mfs;
1520 csp_flags = ntohs(flp->fl_csp.sp_features);
1521 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1522 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1523 if (csp_flags & FC_SP_FT_EDTR)
1524 e_d_tov /= 1000000;
1525
1526 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1527
1528 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1529 if (e_d_tov > lport->e_d_tov)
1530 lport->e_d_tov = e_d_tov;
1531 lport->r_a_tov = 2 * e_d_tov;
1532 fc_lport_set_port_id(lport, did, fp);
1533 printk(KERN_INFO "host%d: libfc: "
1534 "Port (%6.6x) entered "
1535 "point-to-point mode\n",
1536 lport->host->host_no, did);
1537 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1538 get_unaligned_be64(
1539 &flp->fl_wwpn),
1540 get_unaligned_be64(
1541 &flp->fl_wwnn));
1542 } else {
1543 lport->e_d_tov = e_d_tov;
1544 lport->r_a_tov = r_a_tov;
1545 fc_host_fabric_name(lport->host) =
1546 get_unaligned_be64(&flp->fl_wwnn);
1547 fc_lport_set_port_id(lport, did, fp);
1548 fc_lport_enter_dns(lport);
1549 }
1550 }
1551 } else {
1552 FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
1553 fc_lport_error(lport, fp);
1554 }
1555
1556 out:
1557 fc_frame_free(fp);
1558 err:
1559 mutex_unlock(&lport->lp_mutex);
1560 }
1561 EXPORT_SYMBOL(fc_lport_flogi_resp);
1562
1563 /**
1564 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1565 * @lport: Fibre Channel local port to be logged in to the fabric
1566 *
1567 * Locking Note: The lport lock is expected to be held before calling
1568 * this routine.
1569 */
1570 void fc_lport_enter_flogi(struct fc_lport *lport)
1571 {
1572 struct fc_frame *fp;
1573
1574 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1575 fc_lport_state(lport));
1576
1577 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1578
1579 if (lport->point_to_multipoint) {
1580 if (lport->port_id)
1581 fc_lport_enter_ready(lport);
1582 return;
1583 }
1584
1585 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1586 if (!fp)
1587 return fc_lport_error(lport, fp);
1588
1589 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1590 lport->vport ? ELS_FDISC : ELS_FLOGI,
1591 fc_lport_flogi_resp, lport,
1592 lport->vport ? 2 * lport->r_a_tov :
1593 lport->e_d_tov))
1594 fc_lport_error(lport, NULL);
1595 }
1596
1597 /**
1598 * fc_lport_config() - Configure a fc_lport
1599 * @lport: The local port to be configured
1600 */
1601 int fc_lport_config(struct fc_lport *lport)
1602 {
1603 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1604 mutex_init(&lport->lp_mutex);
1605
1606 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1607
1608 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1609 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1610 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
1611
1612 return 0;
1613 }
1614 EXPORT_SYMBOL(fc_lport_config);
1615
1616 /**
1617 * fc_lport_init() - Initialize the lport layer for a local port
1618 * @lport: The local port to initialize the exchange layer for
1619 */
1620 int fc_lport_init(struct fc_lport *lport)
1621 {
1622 if (!lport->tt.lport_recv)
1623 lport->tt.lport_recv = fc_lport_recv_req;
1624
1625 if (!lport->tt.lport_reset)
1626 lport->tt.lport_reset = fc_lport_reset;
1627
1628 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1629 fc_host_node_name(lport->host) = lport->wwnn;
1630 fc_host_port_name(lport->host) = lport->wwpn;
1631 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1632 memset(fc_host_supported_fc4s(lport->host), 0,
1633 sizeof(fc_host_supported_fc4s(lport->host)));
1634 fc_host_supported_fc4s(lport->host)[2] = 1;
1635 fc_host_supported_fc4s(lport->host)[7] = 1;
1636
1637 /* This value is also unchanging */
1638 memset(fc_host_active_fc4s(lport->host), 0,
1639 sizeof(fc_host_active_fc4s(lport->host)));
1640 fc_host_active_fc4s(lport->host)[2] = 1;
1641 fc_host_active_fc4s(lport->host)[7] = 1;
1642 fc_host_maxframe_size(lport->host) = lport->mfs;
1643 fc_host_supported_speeds(lport->host) = 0;
1644 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1645 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1646 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1647 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1648 fc_fc4_add_lport(lport);
1649
1650 return 0;
1651 }
1652 EXPORT_SYMBOL(fc_lport_init);
1653
1654 /**
1655 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1656 * @sp: The sequence for the FC Passthrough response
1657 * @fp: The response frame
1658 * @info_arg: The BSG info that the response is for
1659 */
1660 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1661 void *info_arg)
1662 {
1663 struct fc_bsg_info *info = info_arg;
1664 struct fc_bsg_job *job = info->job;
1665 struct fc_lport *lport = info->lport;
1666 struct fc_frame_header *fh;
1667 size_t len;
1668 void *buf;
1669
1670 if (IS_ERR(fp)) {
1671 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1672 -ECONNABORTED : -ETIMEDOUT;
1673 job->reply_len = sizeof(uint32_t);
1674 job->state_flags |= FC_RQST_STATE_DONE;
1675 job->job_done(job);
1676 kfree(info);
1677 return;
1678 }
1679
1680 mutex_lock(&lport->lp_mutex);
1681 fh = fc_frame_header_get(fp);
1682 len = fr_len(fp) - sizeof(*fh);
1683 buf = fc_frame_payload_get(fp, 0);
1684
1685 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1686 /* Get the response code from the first frame payload */
1687 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1688 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1689 (unsigned short)fc_frame_payload_op(fp);
1690
1691 /* Save the reply status of the job */
1692 job->reply->reply_data.ctels_reply.status =
1693 (cmd == info->rsp_code) ?
1694 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1695 }
1696
1697 job->reply->reply_payload_rcv_len +=
1698 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1699 &info->offset, KM_BIO_SRC_IRQ, NULL);
1700
1701 if (fr_eof(fp) == FC_EOF_T &&
1702 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1703 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1704 if (job->reply->reply_payload_rcv_len >
1705 job->reply_payload.payload_len)
1706 job->reply->reply_payload_rcv_len =
1707 job->reply_payload.payload_len;
1708 job->reply->result = 0;
1709 job->state_flags |= FC_RQST_STATE_DONE;
1710 job->job_done(job);
1711 kfree(info);
1712 }
1713 fc_frame_free(fp);
1714 mutex_unlock(&lport->lp_mutex);
1715 }
1716
1717 /**
1718 * fc_lport_els_request() - Send ELS passthrough request
1719 * @job: The BSG Passthrough job
1720 * @lport: The local port sending the request
1721 * @did: The destination port id
1722 *
1723 * Locking Note: The lport lock is expected to be held before calling
1724 * this routine.
1725 */
1726 static int fc_lport_els_request(struct fc_bsg_job *job,
1727 struct fc_lport *lport,
1728 u32 did, u32 tov)
1729 {
1730 struct fc_bsg_info *info;
1731 struct fc_frame *fp;
1732 struct fc_frame_header *fh;
1733 char *pp;
1734 int len;
1735
1736 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1737 if (!fp)
1738 return -ENOMEM;
1739
1740 len = job->request_payload.payload_len;
1741 pp = fc_frame_payload_get(fp, len);
1742
1743 sg_copy_to_buffer(job->request_payload.sg_list,
1744 job->request_payload.sg_cnt,
1745 pp, len);
1746
1747 fh = fc_frame_header_get(fp);
1748 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1749 hton24(fh->fh_d_id, did);
1750 hton24(fh->fh_s_id, lport->port_id);
1751 fh->fh_type = FC_TYPE_ELS;
1752 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1753 fh->fh_cs_ctl = 0;
1754 fh->fh_df_ctl = 0;
1755 fh->fh_parm_offset = 0;
1756
1757 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1758 if (!info) {
1759 fc_frame_free(fp);
1760 return -ENOMEM;
1761 }
1762
1763 info->job = job;
1764 info->lport = lport;
1765 info->rsp_code = ELS_LS_ACC;
1766 info->nents = job->reply_payload.sg_cnt;
1767 info->sg = job->reply_payload.sg_list;
1768
1769 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1770 NULL, info, tov)) {
1771 kfree(info);
1772 return -ECOMM;
1773 }
1774 return 0;
1775 }
1776
1777 /**
1778 * fc_lport_ct_request() - Send CT Passthrough request
1779 * @job: The BSG Passthrough job
1780 * @lport: The local port sending the request
1781 * @did: The destination FC-ID
1782 * @tov: The timeout period to wait for the response
1783 *
1784 * Locking Note: The lport lock is expected to be held before calling
1785 * this routine.
1786 */
1787 static int fc_lport_ct_request(struct fc_bsg_job *job,
1788 struct fc_lport *lport, u32 did, u32 tov)
1789 {
1790 struct fc_bsg_info *info;
1791 struct fc_frame *fp;
1792 struct fc_frame_header *fh;
1793 struct fc_ct_req *ct;
1794 size_t len;
1795
1796 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1797 job->request_payload.payload_len);
1798 if (!fp)
1799 return -ENOMEM;
1800
1801 len = job->request_payload.payload_len;
1802 ct = fc_frame_payload_get(fp, len);
1803
1804 sg_copy_to_buffer(job->request_payload.sg_list,
1805 job->request_payload.sg_cnt,
1806 ct, len);
1807
1808 fh = fc_frame_header_get(fp);
1809 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1810 hton24(fh->fh_d_id, did);
1811 hton24(fh->fh_s_id, lport->port_id);
1812 fh->fh_type = FC_TYPE_CT;
1813 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1814 fh->fh_cs_ctl = 0;
1815 fh->fh_df_ctl = 0;
1816 fh->fh_parm_offset = 0;
1817
1818 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1819 if (!info) {
1820 fc_frame_free(fp);
1821 return -ENOMEM;
1822 }
1823
1824 info->job = job;
1825 info->lport = lport;
1826 info->rsp_code = FC_FS_ACC;
1827 info->nents = job->reply_payload.sg_cnt;
1828 info->sg = job->reply_payload.sg_list;
1829
1830 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1831 NULL, info, tov)) {
1832 kfree(info);
1833 return -ECOMM;
1834 }
1835 return 0;
1836 }
1837
1838 /**
1839 * fc_lport_bsg_request() - The common entry point for sending
1840 * FC Passthrough requests
1841 * @job: The BSG passthrough job
1842 */
1843 int fc_lport_bsg_request(struct fc_bsg_job *job)
1844 {
1845 struct request *rsp = job->req->next_rq;
1846 struct Scsi_Host *shost = job->shost;
1847 struct fc_lport *lport = shost_priv(shost);
1848 struct fc_rport *rport;
1849 struct fc_rport_priv *rdata;
1850 int rc = -EINVAL;
1851 u32 did;
1852
1853 job->reply->reply_payload_rcv_len = 0;
1854 if (rsp)
1855 rsp->resid_len = job->reply_payload.payload_len;
1856
1857 mutex_lock(&lport->lp_mutex);
1858
1859 switch (job->request->msgcode) {
1860 case FC_BSG_RPT_ELS:
1861 rport = job->rport;
1862 if (!rport)
1863 break;
1864
1865 rdata = rport->dd_data;
1866 rc = fc_lport_els_request(job, lport, rport->port_id,
1867 rdata->e_d_tov);
1868 break;
1869
1870 case FC_BSG_RPT_CT:
1871 rport = job->rport;
1872 if (!rport)
1873 break;
1874
1875 rdata = rport->dd_data;
1876 rc = fc_lport_ct_request(job, lport, rport->port_id,
1877 rdata->e_d_tov);
1878 break;
1879
1880 case FC_BSG_HST_CT:
1881 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1882 if (did == FC_FID_DIR_SERV)
1883 rdata = lport->dns_rdata;
1884 else
1885 rdata = lport->tt.rport_lookup(lport, did);
1886
1887 if (!rdata)
1888 break;
1889
1890 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1891 break;
1892
1893 case FC_BSG_HST_ELS_NOLOGIN:
1894 did = ntoh24(job->request->rqst_data.h_els.port_id);
1895 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1896 break;
1897 }
1898
1899 mutex_unlock(&lport->lp_mutex);
1900 return rc;
1901 }
1902 EXPORT_SYMBOL(fc_lport_bsg_request);