Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/slab.h>
92 #include <asm/unaligned.h>
93
94 #include <scsi/fc/fc_gs.h>
95
96 #include <scsi/libfc.h>
97 #include <scsi/fc_encode.h>
98 #include <linux/scatterlist.h>
99
100 #include "fc_libfc.h"
101
102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
103 #define FC_LOCAL_PTP_FID_LO 0x010101
104 #define FC_LOCAL_PTP_FID_HI 0x010102
105
106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
107
108 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
109
110 static void fc_lport_enter_reset(struct fc_lport *);
111 static void fc_lport_enter_flogi(struct fc_lport *);
112 static void fc_lport_enter_dns(struct fc_lport *);
113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
114 static void fc_lport_enter_scr(struct fc_lport *);
115 static void fc_lport_enter_ready(struct fc_lport *);
116 static void fc_lport_enter_logo(struct fc_lport *);
117
118 static const char *fc_lport_state_names[] = {
119 [LPORT_ST_DISABLED] = "disabled",
120 [LPORT_ST_FLOGI] = "FLOGI",
121 [LPORT_ST_DNS] = "dNS",
122 [LPORT_ST_RNN_ID] = "RNN_ID",
123 [LPORT_ST_RSNN_NN] = "RSNN_NN",
124 [LPORT_ST_RSPN_ID] = "RSPN_ID",
125 [LPORT_ST_RFT_ID] = "RFT_ID",
126 [LPORT_ST_RFF_ID] = "RFF_ID",
127 [LPORT_ST_SCR] = "SCR",
128 [LPORT_ST_READY] = "Ready",
129 [LPORT_ST_LOGO] = "LOGO",
130 [LPORT_ST_RESET] = "reset",
131 };
132
133 /**
134 * struct fc_bsg_info - FC Passthrough managemet structure
135 * @job: The passthrough job
136 * @lport: The local port to pass through a command
137 * @rsp_code: The expected response code
138 * @sg: job->reply_payload.sg_list
139 * @nents: job->reply_payload.sg_cnt
140 * @offset: The offset into the response data
141 */
142 struct fc_bsg_info {
143 struct fc_bsg_job *job;
144 struct fc_lport *lport;
145 u16 rsp_code;
146 struct scatterlist *sg;
147 u32 nents;
148 size_t offset;
149 };
150
151 /**
152 * fc_frame_drop() - Dummy frame handler
153 * @lport: The local port the frame was received on
154 * @fp: The received frame
155 */
156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
157 {
158 fc_frame_free(fp);
159 return 0;
160 }
161
162 /**
163 * fc_lport_rport_callback() - Event handler for rport events
164 * @lport: The lport which is receiving the event
165 * @rdata: private remote port data
166 * @event: The event that occured
167 *
168 * Locking Note: The rport lock should not be held when calling
169 * this function.
170 */
171 static void fc_lport_rport_callback(struct fc_lport *lport,
172 struct fc_rport_priv *rdata,
173 enum fc_rport_event event)
174 {
175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
176 rdata->ids.port_id);
177
178 mutex_lock(&lport->lp_mutex);
179 switch (event) {
180 case RPORT_EV_READY:
181 if (lport->state == LPORT_ST_DNS) {
182 lport->dns_rdata = rdata;
183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
184 } else {
185 FC_LPORT_DBG(lport, "Received an READY event "
186 "on port (%6.6x) for the directory "
187 "server, but the lport is not "
188 "in the DNS state, it's in the "
189 "%d state", rdata->ids.port_id,
190 lport->state);
191 lport->tt.rport_logoff(rdata);
192 }
193 break;
194 case RPORT_EV_LOGO:
195 case RPORT_EV_FAILED:
196 case RPORT_EV_STOP:
197 lport->dns_rdata = NULL;
198 break;
199 case RPORT_EV_NONE:
200 break;
201 }
202 mutex_unlock(&lport->lp_mutex);
203 }
204
205 /**
206 * fc_lport_state() - Return a string which represents the lport's state
207 * @lport: The lport whose state is to converted to a string
208 */
209 static const char *fc_lport_state(struct fc_lport *lport)
210 {
211 const char *cp;
212
213 cp = fc_lport_state_names[lport->state];
214 if (!cp)
215 cp = "unknown";
216 return cp;
217 }
218
219 /**
220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
221 * @lport: The lport to attach the ptp rport to
222 * @remote_fid: The FID of the ptp rport
223 * @remote_wwpn: The WWPN of the ptp rport
224 * @remote_wwnn: The WWNN of the ptp rport
225 */
226 static void fc_lport_ptp_setup(struct fc_lport *lport,
227 u32 remote_fid, u64 remote_wwpn,
228 u64 remote_wwnn)
229 {
230 mutex_lock(&lport->disc.disc_mutex);
231 if (lport->ptp_rdata) {
232 lport->tt.rport_logoff(lport->ptp_rdata);
233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
234 }
235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
236 kref_get(&lport->ptp_rdata->kref);
237 lport->ptp_rdata->ids.port_name = remote_wwpn;
238 lport->ptp_rdata->ids.node_name = remote_wwnn;
239 mutex_unlock(&lport->disc.disc_mutex);
240
241 lport->tt.rport_login(lport->ptp_rdata);
242
243 fc_lport_enter_ready(lport);
244 }
245
246 /**
247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
248 * @shost: The SCSI host whose port state is to be determined
249 */
250 void fc_get_host_port_state(struct Scsi_Host *shost)
251 {
252 struct fc_lport *lport = shost_priv(shost);
253
254 mutex_lock(&lport->lp_mutex);
255 if (!lport->link_up)
256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
257 else
258 switch (lport->state) {
259 case LPORT_ST_READY:
260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
261 break;
262 default:
263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
264 }
265 mutex_unlock(&lport->lp_mutex);
266 }
267 EXPORT_SYMBOL(fc_get_host_port_state);
268
269 /**
270 * fc_get_host_speed() - Return the speed of the given Scsi_Host
271 * @shost: The SCSI host whose port speed is to be determined
272 */
273 void fc_get_host_speed(struct Scsi_Host *shost)
274 {
275 struct fc_lport *lport = shost_priv(shost);
276
277 fc_host_speed(shost) = lport->link_speed;
278 }
279 EXPORT_SYMBOL(fc_get_host_speed);
280
281 /**
282 * fc_get_host_stats() - Return the Scsi_Host's statistics
283 * @shost: The SCSI host whose statistics are to be returned
284 */
285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
286 {
287 struct fc_host_statistics *fcoe_stats;
288 struct fc_lport *lport = shost_priv(shost);
289 struct timespec v0, v1;
290 unsigned int cpu;
291
292 fcoe_stats = &lport->host_stats;
293 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
294
295 jiffies_to_timespec(jiffies, &v0);
296 jiffies_to_timespec(lport->boot_time, &v1);
297 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
298
299 for_each_possible_cpu(cpu) {
300 struct fcoe_dev_stats *stats;
301
302 stats = per_cpu_ptr(lport->dev_stats, cpu);
303
304 fcoe_stats->tx_frames += stats->TxFrames;
305 fcoe_stats->tx_words += stats->TxWords;
306 fcoe_stats->rx_frames += stats->RxFrames;
307 fcoe_stats->rx_words += stats->RxWords;
308 fcoe_stats->error_frames += stats->ErrorFrames;
309 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
310 fcoe_stats->fcp_input_requests += stats->InputRequests;
311 fcoe_stats->fcp_output_requests += stats->OutputRequests;
312 fcoe_stats->fcp_control_requests += stats->ControlRequests;
313 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
314 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
315 fcoe_stats->link_failure_count += stats->LinkFailureCount;
316 }
317 fcoe_stats->lip_count = -1;
318 fcoe_stats->nos_count = -1;
319 fcoe_stats->loss_of_sync_count = -1;
320 fcoe_stats->loss_of_signal_count = -1;
321 fcoe_stats->prim_seq_protocol_err_count = -1;
322 fcoe_stats->dumped_frames = -1;
323 return fcoe_stats;
324 }
325 EXPORT_SYMBOL(fc_get_host_stats);
326
327 /**
328 * fc_lport_flogi_fill() - Fill in FLOGI command for request
329 * @lport: The local port the FLOGI is for
330 * @flogi: The FLOGI command
331 * @op: The opcode
332 */
333 static void fc_lport_flogi_fill(struct fc_lport *lport,
334 struct fc_els_flogi *flogi,
335 unsigned int op)
336 {
337 struct fc_els_csp *sp;
338 struct fc_els_cssp *cp;
339
340 memset(flogi, 0, sizeof(*flogi));
341 flogi->fl_cmd = (u8) op;
342 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
343 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
344 sp = &flogi->fl_csp;
345 sp->sp_hi_ver = 0x20;
346 sp->sp_lo_ver = 0x20;
347 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
348 sp->sp_bb_data = htons((u16) lport->mfs);
349 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
350 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
351 if (op != ELS_FLOGI) {
352 sp->sp_features = htons(FC_SP_FT_CIRO);
353 sp->sp_tot_seq = htons(255); /* seq. we accept */
354 sp->sp_rel_off = htons(0x1f);
355 sp->sp_e_d_tov = htonl(lport->e_d_tov);
356
357 cp->cp_rdfs = htons((u16) lport->mfs);
358 cp->cp_con_seq = htons(255);
359 cp->cp_open_seq = 1;
360 }
361 }
362
363 /**
364 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
365 * @lport: The local port to add a new FC-4 type to
366 * @type: The new FC-4 type
367 */
368 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
369 {
370 __be32 *mp;
371
372 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
373 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
374 }
375
376 /**
377 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
378 * @lport: Fibre Channel local port recieving the RLIR
379 * @fp: The RLIR request frame
380 *
381 * Locking Note: The lport lock is expected to be held before calling
382 * this function.
383 */
384 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
385 {
386 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
387 fc_lport_state(lport));
388
389 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
390 fc_frame_free(fp);
391 }
392
393 /**
394 * fc_lport_recv_echo_req() - Handle received ECHO request
395 * @lport: The local port recieving the ECHO
396 * @fp: ECHO request frame
397 *
398 * Locking Note: The lport lock is expected to be held before calling
399 * this function.
400 */
401 static void fc_lport_recv_echo_req(struct fc_lport *lport,
402 struct fc_frame *in_fp)
403 {
404 struct fc_frame *fp;
405 unsigned int len;
406 void *pp;
407 void *dp;
408
409 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
410 fc_lport_state(lport));
411
412 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
413 pp = fc_frame_payload_get(in_fp, len);
414
415 if (len < sizeof(__be32))
416 len = sizeof(__be32);
417
418 fp = fc_frame_alloc(lport, len);
419 if (fp) {
420 dp = fc_frame_payload_get(fp, len);
421 memcpy(dp, pp, len);
422 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
423 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
424 lport->tt.frame_send(lport, fp);
425 }
426 fc_frame_free(in_fp);
427 }
428
429 /**
430 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
431 * @lport: The local port recieving the RNID
432 * @fp: The RNID request frame
433 *
434 * Locking Note: The lport lock is expected to be held before calling
435 * this function.
436 */
437 static void fc_lport_recv_rnid_req(struct fc_lport *lport,
438 struct fc_frame *in_fp)
439 {
440 struct fc_frame *fp;
441 struct fc_els_rnid *req;
442 struct {
443 struct fc_els_rnid_resp rnid;
444 struct fc_els_rnid_cid cid;
445 struct fc_els_rnid_gen gen;
446 } *rp;
447 struct fc_seq_els_data rjt_data;
448 u8 fmt;
449 size_t len;
450
451 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
452 fc_lport_state(lport));
453
454 req = fc_frame_payload_get(in_fp, sizeof(*req));
455 if (!req) {
456 rjt_data.reason = ELS_RJT_LOGIC;
457 rjt_data.explan = ELS_EXPL_NONE;
458 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
459 } else {
460 fmt = req->rnid_fmt;
461 len = sizeof(*rp);
462 if (fmt != ELS_RNIDF_GEN ||
463 ntohl(lport->rnid_gen.rnid_atype) == 0) {
464 fmt = ELS_RNIDF_NONE; /* nothing to provide */
465 len -= sizeof(rp->gen);
466 }
467 fp = fc_frame_alloc(lport, len);
468 if (fp) {
469 rp = fc_frame_payload_get(fp, len);
470 memset(rp, 0, len);
471 rp->rnid.rnid_cmd = ELS_LS_ACC;
472 rp->rnid.rnid_fmt = fmt;
473 rp->rnid.rnid_cid_len = sizeof(rp->cid);
474 rp->cid.rnid_wwpn = htonll(lport->wwpn);
475 rp->cid.rnid_wwnn = htonll(lport->wwnn);
476 if (fmt == ELS_RNIDF_GEN) {
477 rp->rnid.rnid_sid_len = sizeof(rp->gen);
478 memcpy(&rp->gen, &lport->rnid_gen,
479 sizeof(rp->gen));
480 }
481 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
482 lport->tt.frame_send(lport, fp);
483 }
484 }
485 fc_frame_free(in_fp);
486 }
487
488 /**
489 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
490 * @lport: The local port recieving the LOGO
491 * @fp: The LOGO request frame
492 *
493 * Locking Note: The lport lock is exected to be held before calling
494 * this function.
495 */
496 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
497 {
498 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
499 fc_lport_enter_reset(lport);
500 fc_frame_free(fp);
501 }
502
503 /**
504 * fc_fabric_login() - Start the lport state machine
505 * @lport: The local port that should log into the fabric
506 *
507 * Locking Note: This function should not be called
508 * with the lport lock held.
509 */
510 int fc_fabric_login(struct fc_lport *lport)
511 {
512 int rc = -1;
513
514 mutex_lock(&lport->lp_mutex);
515 if (lport->state == LPORT_ST_DISABLED ||
516 lport->state == LPORT_ST_LOGO) {
517 fc_lport_state_enter(lport, LPORT_ST_RESET);
518 fc_lport_enter_reset(lport);
519 rc = 0;
520 }
521 mutex_unlock(&lport->lp_mutex);
522
523 return rc;
524 }
525 EXPORT_SYMBOL(fc_fabric_login);
526
527 /**
528 * __fc_linkup() - Handler for transport linkup events
529 * @lport: The lport whose link is up
530 *
531 * Locking: must be called with the lp_mutex held
532 */
533 void __fc_linkup(struct fc_lport *lport)
534 {
535 if (!lport->link_up) {
536 lport->link_up = 1;
537
538 if (lport->state == LPORT_ST_RESET)
539 fc_lport_enter_flogi(lport);
540 }
541 }
542
543 /**
544 * fc_linkup() - Handler for transport linkup events
545 * @lport: The local port whose link is up
546 */
547 void fc_linkup(struct fc_lport *lport)
548 {
549 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
550 lport->host->host_no, lport->port_id);
551
552 mutex_lock(&lport->lp_mutex);
553 __fc_linkup(lport);
554 mutex_unlock(&lport->lp_mutex);
555 }
556 EXPORT_SYMBOL(fc_linkup);
557
558 /**
559 * __fc_linkdown() - Handler for transport linkdown events
560 * @lport: The lport whose link is down
561 *
562 * Locking: must be called with the lp_mutex held
563 */
564 void __fc_linkdown(struct fc_lport *lport)
565 {
566 if (lport->link_up) {
567 lport->link_up = 0;
568 fc_lport_enter_reset(lport);
569 lport->tt.fcp_cleanup(lport);
570 }
571 }
572
573 /**
574 * fc_linkdown() - Handler for transport linkdown events
575 * @lport: The local port whose link is down
576 */
577 void fc_linkdown(struct fc_lport *lport)
578 {
579 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
580 lport->host->host_no, lport->port_id);
581
582 mutex_lock(&lport->lp_mutex);
583 __fc_linkdown(lport);
584 mutex_unlock(&lport->lp_mutex);
585 }
586 EXPORT_SYMBOL(fc_linkdown);
587
588 /**
589 * fc_fabric_logoff() - Logout of the fabric
590 * @lport: The local port to logoff the fabric
591 *
592 * Return value:
593 * 0 for success, -1 for failure
594 */
595 int fc_fabric_logoff(struct fc_lport *lport)
596 {
597 lport->tt.disc_stop_final(lport);
598 mutex_lock(&lport->lp_mutex);
599 if (lport->dns_rdata)
600 lport->tt.rport_logoff(lport->dns_rdata);
601 mutex_unlock(&lport->lp_mutex);
602 lport->tt.rport_flush_queue();
603 mutex_lock(&lport->lp_mutex);
604 fc_lport_enter_logo(lport);
605 mutex_unlock(&lport->lp_mutex);
606 cancel_delayed_work_sync(&lport->retry_work);
607 return 0;
608 }
609 EXPORT_SYMBOL(fc_fabric_logoff);
610
611 /**
612 * fc_lport_destroy() - Unregister a fc_lport
613 * @lport: The local port to unregister
614 *
615 * Note:
616 * exit routine for fc_lport instance
617 * clean-up all the allocated memory
618 * and free up other system resources.
619 *
620 */
621 int fc_lport_destroy(struct fc_lport *lport)
622 {
623 mutex_lock(&lport->lp_mutex);
624 lport->state = LPORT_ST_DISABLED;
625 lport->link_up = 0;
626 lport->tt.frame_send = fc_frame_drop;
627 mutex_unlock(&lport->lp_mutex);
628
629 lport->tt.fcp_abort_io(lport);
630 lport->tt.disc_stop_final(lport);
631 lport->tt.exch_mgr_reset(lport, 0, 0);
632 return 0;
633 }
634 EXPORT_SYMBOL(fc_lport_destroy);
635
636 /**
637 * fc_set_mfs() - Set the maximum frame size for a local port
638 * @lport: The local port to set the MFS for
639 * @mfs: The new MFS
640 */
641 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
642 {
643 unsigned int old_mfs;
644 int rc = -EINVAL;
645
646 mutex_lock(&lport->lp_mutex);
647
648 old_mfs = lport->mfs;
649
650 if (mfs >= FC_MIN_MAX_FRAME) {
651 mfs &= ~3;
652 if (mfs > FC_MAX_FRAME)
653 mfs = FC_MAX_FRAME;
654 mfs -= sizeof(struct fc_frame_header);
655 lport->mfs = mfs;
656 rc = 0;
657 }
658
659 if (!rc && mfs < old_mfs)
660 fc_lport_enter_reset(lport);
661
662 mutex_unlock(&lport->lp_mutex);
663
664 return rc;
665 }
666 EXPORT_SYMBOL(fc_set_mfs);
667
668 /**
669 * fc_lport_disc_callback() - Callback for discovery events
670 * @lport: The local port receiving the event
671 * @event: The discovery event
672 */
673 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
674 {
675 switch (event) {
676 case DISC_EV_SUCCESS:
677 FC_LPORT_DBG(lport, "Discovery succeeded\n");
678 break;
679 case DISC_EV_FAILED:
680 printk(KERN_ERR "host%d: libfc: "
681 "Discovery failed for port (%6.6x)\n",
682 lport->host->host_no, lport->port_id);
683 mutex_lock(&lport->lp_mutex);
684 fc_lport_enter_reset(lport);
685 mutex_unlock(&lport->lp_mutex);
686 break;
687 case DISC_EV_NONE:
688 WARN_ON(1);
689 break;
690 }
691 }
692
693 /**
694 * fc_rport_enter_ready() - Enter the ready state and start discovery
695 * @lport: The local port that is ready
696 *
697 * Locking Note: The lport lock is expected to be held before calling
698 * this routine.
699 */
700 static void fc_lport_enter_ready(struct fc_lport *lport)
701 {
702 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
703 fc_lport_state(lport));
704
705 fc_lport_state_enter(lport, LPORT_ST_READY);
706 if (lport->vport)
707 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
708 fc_vports_linkchange(lport);
709
710 if (!lport->ptp_rdata)
711 lport->tt.disc_start(fc_lport_disc_callback, lport);
712 }
713
714 /**
715 * fc_lport_set_port_id() - set the local port Port ID
716 * @lport: The local port which will have its Port ID set.
717 * @port_id: The new port ID.
718 * @fp: The frame containing the incoming request, or NULL.
719 *
720 * Locking Note: The lport lock is expected to be held before calling
721 * this function.
722 */
723 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
724 struct fc_frame *fp)
725 {
726 if (port_id)
727 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
728 lport->host->host_no, port_id);
729
730 lport->port_id = port_id;
731
732 /* Update the fc_host */
733 fc_host_port_id(lport->host) = port_id;
734
735 if (lport->tt.lport_set_port_id)
736 lport->tt.lport_set_port_id(lport, port_id, fp);
737 }
738
739 /**
740 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
741 * @lport: The local port which will have its Port ID set.
742 * @port_id: The new port ID.
743 *
744 * Called by the lower-level driver when transport sets the local port_id.
745 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
746 * discovery to be skipped.
747 */
748 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
749 {
750 mutex_lock(&lport->lp_mutex);
751
752 fc_lport_set_port_id(lport, port_id, NULL);
753
754 switch (lport->state) {
755 case LPORT_ST_RESET:
756 case LPORT_ST_FLOGI:
757 if (port_id)
758 fc_lport_enter_ready(lport);
759 break;
760 default:
761 break;
762 }
763 mutex_unlock(&lport->lp_mutex);
764 }
765 EXPORT_SYMBOL(fc_lport_set_local_id);
766
767 /**
768 * fc_lport_recv_flogi_req() - Receive a FLOGI request
769 * @lport: The local port that recieved the request
770 * @rx_fp: The FLOGI frame
771 *
772 * A received FLOGI request indicates a point-to-point connection.
773 * Accept it with the common service parameters indicating our N port.
774 * Set up to do a PLOGI if we have the higher-number WWPN.
775 *
776 * Locking Note: The lport lock is expected to be held before calling
777 * this function.
778 */
779 static void fc_lport_recv_flogi_req(struct fc_lport *lport,
780 struct fc_frame *rx_fp)
781 {
782 struct fc_frame *fp;
783 struct fc_frame_header *fh;
784 struct fc_els_flogi *flp;
785 struct fc_els_flogi *new_flp;
786 u64 remote_wwpn;
787 u32 remote_fid;
788 u32 local_fid;
789
790 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
791 fc_lport_state(lport));
792
793 remote_fid = fc_frame_sid(rx_fp);
794 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
795 if (!flp)
796 goto out;
797 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
798 if (remote_wwpn == lport->wwpn) {
799 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
800 "with same WWPN %16.16llx\n",
801 lport->host->host_no, remote_wwpn);
802 goto out;
803 }
804 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
805
806 /*
807 * XXX what is the right thing to do for FIDs?
808 * The originator might expect our S_ID to be 0xfffffe.
809 * But if so, both of us could end up with the same FID.
810 */
811 local_fid = FC_LOCAL_PTP_FID_LO;
812 if (remote_wwpn < lport->wwpn) {
813 local_fid = FC_LOCAL_PTP_FID_HI;
814 if (!remote_fid || remote_fid == local_fid)
815 remote_fid = FC_LOCAL_PTP_FID_LO;
816 } else if (!remote_fid) {
817 remote_fid = FC_LOCAL_PTP_FID_HI;
818 }
819
820 fc_lport_set_port_id(lport, local_fid, rx_fp);
821
822 fp = fc_frame_alloc(lport, sizeof(*flp));
823 if (fp) {
824 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
825 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
826 new_flp->fl_cmd = (u8) ELS_LS_ACC;
827
828 /*
829 * Send the response. If this fails, the originator should
830 * repeat the sequence.
831 */
832 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
833 fh = fc_frame_header_get(fp);
834 hton24(fh->fh_s_id, local_fid);
835 hton24(fh->fh_d_id, remote_fid);
836 lport->tt.frame_send(lport, fp);
837
838 } else {
839 fc_lport_error(lport, fp);
840 }
841 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
842 get_unaligned_be64(&flp->fl_wwnn));
843 out:
844 fc_frame_free(rx_fp);
845 }
846
847 /**
848 * fc_lport_recv_req() - The generic lport request handler
849 * @lport: The local port that received the request
850 * @fp: The request frame
851 *
852 * This function will see if the lport handles the request or
853 * if an rport should handle the request.
854 *
855 * Locking Note: This function should not be called with the lport
856 * lock held becuase it will grab the lock.
857 */
858 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
859 {
860 struct fc_frame_header *fh = fc_frame_header_get(fp);
861 void (*recv)(struct fc_lport *, struct fc_frame *);
862
863 mutex_lock(&lport->lp_mutex);
864
865 /*
866 * Handle special ELS cases like FLOGI, LOGO, and
867 * RSCN here. These don't require a session.
868 * Even if we had a session, it might not be ready.
869 */
870 if (!lport->link_up)
871 fc_frame_free(fp);
872 else if (fh->fh_type == FC_TYPE_ELS &&
873 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
874 /*
875 * Check opcode.
876 */
877 recv = lport->tt.rport_recv_req;
878 switch (fc_frame_payload_op(fp)) {
879 case ELS_FLOGI:
880 if (!lport->point_to_multipoint)
881 recv = fc_lport_recv_flogi_req;
882 break;
883 case ELS_LOGO:
884 if (fc_frame_sid(fp) == FC_FID_FLOGI)
885 recv = fc_lport_recv_logo_req;
886 break;
887 case ELS_RSCN:
888 recv = lport->tt.disc_recv_req;
889 break;
890 case ELS_ECHO:
891 recv = fc_lport_recv_echo_req;
892 break;
893 case ELS_RLIR:
894 recv = fc_lport_recv_rlir_req;
895 break;
896 case ELS_RNID:
897 recv = fc_lport_recv_rnid_req;
898 break;
899 }
900
901 recv(lport, fp);
902 } else {
903 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
904 fr_eof(fp));
905 fc_frame_free(fp);
906 }
907 mutex_unlock(&lport->lp_mutex);
908 }
909
910 /**
911 * fc_lport_reset() - Reset a local port
912 * @lport: The local port which should be reset
913 *
914 * Locking Note: This functions should not be called with the
915 * lport lock held.
916 */
917 int fc_lport_reset(struct fc_lport *lport)
918 {
919 cancel_delayed_work_sync(&lport->retry_work);
920 mutex_lock(&lport->lp_mutex);
921 fc_lport_enter_reset(lport);
922 mutex_unlock(&lport->lp_mutex);
923 return 0;
924 }
925 EXPORT_SYMBOL(fc_lport_reset);
926
927 /**
928 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
929 * @lport: The local port to be reset
930 *
931 * Locking Note: The lport lock is expected to be held before calling
932 * this routine.
933 */
934 static void fc_lport_reset_locked(struct fc_lport *lport)
935 {
936 if (lport->dns_rdata)
937 lport->tt.rport_logoff(lport->dns_rdata);
938
939 if (lport->ptp_rdata) {
940 lport->tt.rport_logoff(lport->ptp_rdata);
941 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
942 lport->ptp_rdata = NULL;
943 }
944
945 lport->tt.disc_stop(lport);
946
947 lport->tt.exch_mgr_reset(lport, 0, 0);
948 fc_host_fabric_name(lport->host) = 0;
949
950 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
951 fc_lport_set_port_id(lport, 0, NULL);
952 }
953
954 /**
955 * fc_lport_enter_reset() - Reset the local port
956 * @lport: The local port to be reset
957 *
958 * Locking Note: The lport lock is expected to be held before calling
959 * this routine.
960 */
961 static void fc_lport_enter_reset(struct fc_lport *lport)
962 {
963 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
964 fc_lport_state(lport));
965
966 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
967 return;
968
969 if (lport->vport) {
970 if (lport->link_up)
971 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
972 else
973 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
974 }
975 fc_lport_state_enter(lport, LPORT_ST_RESET);
976 fc_vports_linkchange(lport);
977 fc_lport_reset_locked(lport);
978 if (lport->link_up)
979 fc_lport_enter_flogi(lport);
980 }
981
982 /**
983 * fc_lport_enter_disabled() - Disable the local port
984 * @lport: The local port to be reset
985 *
986 * Locking Note: The lport lock is expected to be held before calling
987 * this routine.
988 */
989 static void fc_lport_enter_disabled(struct fc_lport *lport)
990 {
991 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
992 fc_lport_state(lport));
993
994 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
995 fc_vports_linkchange(lport);
996 fc_lport_reset_locked(lport);
997 }
998
999 /**
1000 * fc_lport_error() - Handler for any errors
1001 * @lport: The local port that the error was on
1002 * @fp: The error code encoded in a frame pointer
1003 *
1004 * If the error was caused by a resource allocation failure
1005 * then wait for half a second and retry, otherwise retry
1006 * after the e_d_tov time.
1007 */
1008 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1009 {
1010 unsigned long delay = 0;
1011 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1012 PTR_ERR(fp), fc_lport_state(lport),
1013 lport->retry_count);
1014
1015 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1016 return;
1017
1018 /*
1019 * Memory allocation failure, or the exchange timed out
1020 * or we received LS_RJT.
1021 * Retry after delay
1022 */
1023 if (lport->retry_count < lport->max_retry_count) {
1024 lport->retry_count++;
1025 if (!fp)
1026 delay = msecs_to_jiffies(500);
1027 else
1028 delay = msecs_to_jiffies(lport->e_d_tov);
1029
1030 schedule_delayed_work(&lport->retry_work, delay);
1031 } else
1032 fc_lport_enter_reset(lport);
1033 }
1034
1035 /**
1036 * fc_lport_ns_resp() - Handle response to a name server
1037 * registration exchange
1038 * @sp: current sequence in exchange
1039 * @fp: response frame
1040 * @lp_arg: Fibre Channel host port instance
1041 *
1042 * Locking Note: This function will be called without the lport lock
1043 * held, but it will lock, call an _enter_* function or fc_lport_error()
1044 * and then unlock the lport.
1045 */
1046 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1047 void *lp_arg)
1048 {
1049 struct fc_lport *lport = lp_arg;
1050 struct fc_frame_header *fh;
1051 struct fc_ct_hdr *ct;
1052
1053 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1054
1055 if (fp == ERR_PTR(-FC_EX_CLOSED))
1056 return;
1057
1058 mutex_lock(&lport->lp_mutex);
1059
1060 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1061 FC_LPORT_DBG(lport, "Received a name server response, "
1062 "but in state %s\n", fc_lport_state(lport));
1063 if (IS_ERR(fp))
1064 goto err;
1065 goto out;
1066 }
1067
1068 if (IS_ERR(fp)) {
1069 fc_lport_error(lport, fp);
1070 goto err;
1071 }
1072
1073 fh = fc_frame_header_get(fp);
1074 ct = fc_frame_payload_get(fp, sizeof(*ct));
1075
1076 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1077 ct->ct_fs_type == FC_FST_DIR &&
1078 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1079 ntohs(ct->ct_cmd) == FC_FS_ACC)
1080 switch (lport->state) {
1081 case LPORT_ST_RNN_ID:
1082 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1083 break;
1084 case LPORT_ST_RSNN_NN:
1085 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1086 break;
1087 case LPORT_ST_RSPN_ID:
1088 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1089 break;
1090 case LPORT_ST_RFT_ID:
1091 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1092 break;
1093 case LPORT_ST_RFF_ID:
1094 fc_lport_enter_scr(lport);
1095 break;
1096 default:
1097 /* should have already been caught by state checks */
1098 break;
1099 }
1100 else
1101 fc_lport_error(lport, fp);
1102 out:
1103 fc_frame_free(fp);
1104 err:
1105 mutex_unlock(&lport->lp_mutex);
1106 }
1107
1108 /**
1109 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1110 * @sp: current sequence in SCR exchange
1111 * @fp: response frame
1112 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1113 *
1114 * Locking Note: This function will be called without the lport lock
1115 * held, but it will lock, call an _enter_* function or fc_lport_error
1116 * and then unlock the lport.
1117 */
1118 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1119 void *lp_arg)
1120 {
1121 struct fc_lport *lport = lp_arg;
1122 u8 op;
1123
1124 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1125
1126 if (fp == ERR_PTR(-FC_EX_CLOSED))
1127 return;
1128
1129 mutex_lock(&lport->lp_mutex);
1130
1131 if (lport->state != LPORT_ST_SCR) {
1132 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1133 "%s\n", fc_lport_state(lport));
1134 if (IS_ERR(fp))
1135 goto err;
1136 goto out;
1137 }
1138
1139 if (IS_ERR(fp)) {
1140 fc_lport_error(lport, fp);
1141 goto err;
1142 }
1143
1144 op = fc_frame_payload_op(fp);
1145 if (op == ELS_LS_ACC)
1146 fc_lport_enter_ready(lport);
1147 else
1148 fc_lport_error(lport, fp);
1149
1150 out:
1151 fc_frame_free(fp);
1152 err:
1153 mutex_unlock(&lport->lp_mutex);
1154 }
1155
1156 /**
1157 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1158 * @lport: The local port to register for state changes
1159 *
1160 * Locking Note: The lport lock is expected to be held before calling
1161 * this routine.
1162 */
1163 static void fc_lport_enter_scr(struct fc_lport *lport)
1164 {
1165 struct fc_frame *fp;
1166
1167 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1168 fc_lport_state(lport));
1169
1170 fc_lport_state_enter(lport, LPORT_ST_SCR);
1171
1172 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1173 if (!fp) {
1174 fc_lport_error(lport, fp);
1175 return;
1176 }
1177
1178 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1179 fc_lport_scr_resp, lport,
1180 2 * lport->r_a_tov))
1181 fc_lport_error(lport, NULL);
1182 }
1183
1184 /**
1185 * fc_lport_enter_ns() - register some object with the name server
1186 * @lport: Fibre Channel local port to register
1187 *
1188 * Locking Note: The lport lock is expected to be held before calling
1189 * this routine.
1190 */
1191 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1192 {
1193 struct fc_frame *fp;
1194 enum fc_ns_req cmd;
1195 int size = sizeof(struct fc_ct_hdr);
1196 size_t len;
1197
1198 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1199 fc_lport_state_names[state],
1200 fc_lport_state(lport));
1201
1202 fc_lport_state_enter(lport, state);
1203
1204 switch (state) {
1205 case LPORT_ST_RNN_ID:
1206 cmd = FC_NS_RNN_ID;
1207 size += sizeof(struct fc_ns_rn_id);
1208 break;
1209 case LPORT_ST_RSNN_NN:
1210 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1211 /* if there is no symbolic name, skip to RFT_ID */
1212 if (!len)
1213 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1214 cmd = FC_NS_RSNN_NN;
1215 size += sizeof(struct fc_ns_rsnn) + len;
1216 break;
1217 case LPORT_ST_RSPN_ID:
1218 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1219 /* if there is no symbolic name, skip to RFT_ID */
1220 if (!len)
1221 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1222 cmd = FC_NS_RSPN_ID;
1223 size += sizeof(struct fc_ns_rspn) + len;
1224 break;
1225 case LPORT_ST_RFT_ID:
1226 cmd = FC_NS_RFT_ID;
1227 size += sizeof(struct fc_ns_rft);
1228 break;
1229 case LPORT_ST_RFF_ID:
1230 cmd = FC_NS_RFF_ID;
1231 size += sizeof(struct fc_ns_rff_id);
1232 break;
1233 default:
1234 fc_lport_error(lport, NULL);
1235 return;
1236 }
1237
1238 fp = fc_frame_alloc(lport, size);
1239 if (!fp) {
1240 fc_lport_error(lport, fp);
1241 return;
1242 }
1243
1244 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1245 fc_lport_ns_resp,
1246 lport, 3 * lport->r_a_tov))
1247 fc_lport_error(lport, fp);
1248 }
1249
1250 static struct fc_rport_operations fc_lport_rport_ops = {
1251 .event_callback = fc_lport_rport_callback,
1252 };
1253
1254 /**
1255 * fc_rport_enter_dns() - Create a fc_rport for the name server
1256 * @lport: The local port requesting a remote port for the name server
1257 *
1258 * Locking Note: The lport lock is expected to be held before calling
1259 * this routine.
1260 */
1261 static void fc_lport_enter_dns(struct fc_lport *lport)
1262 {
1263 struct fc_rport_priv *rdata;
1264
1265 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1266 fc_lport_state(lport));
1267
1268 fc_lport_state_enter(lport, LPORT_ST_DNS);
1269
1270 mutex_lock(&lport->disc.disc_mutex);
1271 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1272 mutex_unlock(&lport->disc.disc_mutex);
1273 if (!rdata)
1274 goto err;
1275
1276 rdata->ops = &fc_lport_rport_ops;
1277 lport->tt.rport_login(rdata);
1278 return;
1279
1280 err:
1281 fc_lport_error(lport, NULL);
1282 }
1283
1284 /**
1285 * fc_lport_timeout() - Handler for the retry_work timer
1286 * @work: The work struct of the local port
1287 */
1288 static void fc_lport_timeout(struct work_struct *work)
1289 {
1290 struct fc_lport *lport =
1291 container_of(work, struct fc_lport,
1292 retry_work.work);
1293
1294 mutex_lock(&lport->lp_mutex);
1295
1296 switch (lport->state) {
1297 case LPORT_ST_DISABLED:
1298 WARN_ON(1);
1299 break;
1300 case LPORT_ST_READY:
1301 WARN_ON(1);
1302 break;
1303 case LPORT_ST_RESET:
1304 break;
1305 case LPORT_ST_FLOGI:
1306 fc_lport_enter_flogi(lport);
1307 break;
1308 case LPORT_ST_DNS:
1309 fc_lport_enter_dns(lport);
1310 break;
1311 case LPORT_ST_RNN_ID:
1312 case LPORT_ST_RSNN_NN:
1313 case LPORT_ST_RSPN_ID:
1314 case LPORT_ST_RFT_ID:
1315 case LPORT_ST_RFF_ID:
1316 fc_lport_enter_ns(lport, lport->state);
1317 break;
1318 case LPORT_ST_SCR:
1319 fc_lport_enter_scr(lport);
1320 break;
1321 case LPORT_ST_LOGO:
1322 fc_lport_enter_logo(lport);
1323 break;
1324 }
1325
1326 mutex_unlock(&lport->lp_mutex);
1327 }
1328
1329 /**
1330 * fc_lport_logo_resp() - Handle response to LOGO request
1331 * @sp: The sequence that the LOGO was on
1332 * @fp: The LOGO frame
1333 * @lp_arg: The lport port that received the LOGO request
1334 *
1335 * Locking Note: This function will be called without the lport lock
1336 * held, but it will lock, call an _enter_* function or fc_lport_error()
1337 * and then unlock the lport.
1338 */
1339 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1340 void *lp_arg)
1341 {
1342 struct fc_lport *lport = lp_arg;
1343 u8 op;
1344
1345 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1346
1347 if (fp == ERR_PTR(-FC_EX_CLOSED))
1348 return;
1349
1350 mutex_lock(&lport->lp_mutex);
1351
1352 if (lport->state != LPORT_ST_LOGO) {
1353 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1354 "%s\n", fc_lport_state(lport));
1355 if (IS_ERR(fp))
1356 goto err;
1357 goto out;
1358 }
1359
1360 if (IS_ERR(fp)) {
1361 fc_lport_error(lport, fp);
1362 goto err;
1363 }
1364
1365 op = fc_frame_payload_op(fp);
1366 if (op == ELS_LS_ACC)
1367 fc_lport_enter_disabled(lport);
1368 else
1369 fc_lport_error(lport, fp);
1370
1371 out:
1372 fc_frame_free(fp);
1373 err:
1374 mutex_unlock(&lport->lp_mutex);
1375 }
1376 EXPORT_SYMBOL(fc_lport_logo_resp);
1377
1378 /**
1379 * fc_rport_enter_logo() - Logout of the fabric
1380 * @lport: The local port to be logged out
1381 *
1382 * Locking Note: The lport lock is expected to be held before calling
1383 * this routine.
1384 */
1385 static void fc_lport_enter_logo(struct fc_lport *lport)
1386 {
1387 struct fc_frame *fp;
1388 struct fc_els_logo *logo;
1389
1390 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1391 fc_lport_state(lport));
1392
1393 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1394 fc_vports_linkchange(lport);
1395
1396 fp = fc_frame_alloc(lport, sizeof(*logo));
1397 if (!fp) {
1398 fc_lport_error(lport, fp);
1399 return;
1400 }
1401
1402 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1403 fc_lport_logo_resp, lport,
1404 2 * lport->r_a_tov))
1405 fc_lport_error(lport, NULL);
1406 }
1407
1408 /**
1409 * fc_lport_flogi_resp() - Handle response to FLOGI request
1410 * @sp: The sequence that the FLOGI was on
1411 * @fp: The FLOGI response frame
1412 * @lp_arg: The lport port that received the FLOGI response
1413 *
1414 * Locking Note: This function will be called without the lport lock
1415 * held, but it will lock, call an _enter_* function or fc_lport_error()
1416 * and then unlock the lport.
1417 */
1418 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1419 void *lp_arg)
1420 {
1421 struct fc_lport *lport = lp_arg;
1422 struct fc_els_flogi *flp;
1423 u32 did;
1424 u16 csp_flags;
1425 unsigned int r_a_tov;
1426 unsigned int e_d_tov;
1427 u16 mfs;
1428
1429 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1430
1431 if (fp == ERR_PTR(-FC_EX_CLOSED))
1432 return;
1433
1434 mutex_lock(&lport->lp_mutex);
1435
1436 if (lport->state != LPORT_ST_FLOGI) {
1437 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1438 "%s\n", fc_lport_state(lport));
1439 if (IS_ERR(fp))
1440 goto err;
1441 goto out;
1442 }
1443
1444 if (IS_ERR(fp)) {
1445 fc_lport_error(lport, fp);
1446 goto err;
1447 }
1448
1449 did = fc_frame_did(fp);
1450
1451 if (!did) {
1452 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1453 goto out;
1454 }
1455
1456 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
1457 flp = fc_frame_payload_get(fp, sizeof(*flp));
1458 if (flp) {
1459 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1460 FC_SP_BB_DATA_MASK;
1461 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1462 mfs < lport->mfs)
1463 lport->mfs = mfs;
1464 csp_flags = ntohs(flp->fl_csp.sp_features);
1465 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1466 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1467 if (csp_flags & FC_SP_FT_EDTR)
1468 e_d_tov /= 1000000;
1469
1470 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1471
1472 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1473 if (e_d_tov > lport->e_d_tov)
1474 lport->e_d_tov = e_d_tov;
1475 lport->r_a_tov = 2 * e_d_tov;
1476 fc_lport_set_port_id(lport, did, fp);
1477 printk(KERN_INFO "host%d: libfc: "
1478 "Port (%6.6x) entered "
1479 "point-to-point mode\n",
1480 lport->host->host_no, did);
1481 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1482 get_unaligned_be64(
1483 &flp->fl_wwpn),
1484 get_unaligned_be64(
1485 &flp->fl_wwnn));
1486 } else {
1487 lport->e_d_tov = e_d_tov;
1488 lport->r_a_tov = r_a_tov;
1489 fc_host_fabric_name(lport->host) =
1490 get_unaligned_be64(&flp->fl_wwnn);
1491 fc_lport_set_port_id(lport, did, fp);
1492 fc_lport_enter_dns(lport);
1493 }
1494 }
1495 } else
1496 fc_lport_error(lport, fp);
1497
1498 out:
1499 fc_frame_free(fp);
1500 err:
1501 mutex_unlock(&lport->lp_mutex);
1502 }
1503 EXPORT_SYMBOL(fc_lport_flogi_resp);
1504
1505 /**
1506 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1507 * @lport: Fibre Channel local port to be logged in to the fabric
1508 *
1509 * Locking Note: The lport lock is expected to be held before calling
1510 * this routine.
1511 */
1512 void fc_lport_enter_flogi(struct fc_lport *lport)
1513 {
1514 struct fc_frame *fp;
1515
1516 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1517 fc_lport_state(lport));
1518
1519 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1520
1521 if (lport->point_to_multipoint) {
1522 if (lport->port_id)
1523 fc_lport_enter_ready(lport);
1524 return;
1525 }
1526
1527 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1528 if (!fp)
1529 return fc_lport_error(lport, fp);
1530
1531 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1532 lport->vport ? ELS_FDISC : ELS_FLOGI,
1533 fc_lport_flogi_resp, lport,
1534 lport->vport ? 2 * lport->r_a_tov :
1535 lport->e_d_tov))
1536 fc_lport_error(lport, NULL);
1537 }
1538
1539 /**
1540 * fc_lport_config() - Configure a fc_lport
1541 * @lport: The local port to be configured
1542 */
1543 int fc_lport_config(struct fc_lport *lport)
1544 {
1545 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1546 mutex_init(&lport->lp_mutex);
1547
1548 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1549
1550 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1551 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1552
1553 return 0;
1554 }
1555 EXPORT_SYMBOL(fc_lport_config);
1556
1557 /**
1558 * fc_lport_init() - Initialize the lport layer for a local port
1559 * @lport: The local port to initialize the exchange layer for
1560 */
1561 int fc_lport_init(struct fc_lport *lport)
1562 {
1563 if (!lport->tt.lport_recv)
1564 lport->tt.lport_recv = fc_lport_recv_req;
1565
1566 if (!lport->tt.lport_reset)
1567 lport->tt.lport_reset = fc_lport_reset;
1568
1569 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1570 fc_host_node_name(lport->host) = lport->wwnn;
1571 fc_host_port_name(lport->host) = lport->wwpn;
1572 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1573 memset(fc_host_supported_fc4s(lport->host), 0,
1574 sizeof(fc_host_supported_fc4s(lport->host)));
1575 fc_host_supported_fc4s(lport->host)[2] = 1;
1576 fc_host_supported_fc4s(lport->host)[7] = 1;
1577
1578 /* This value is also unchanging */
1579 memset(fc_host_active_fc4s(lport->host), 0,
1580 sizeof(fc_host_active_fc4s(lport->host)));
1581 fc_host_active_fc4s(lport->host)[2] = 1;
1582 fc_host_active_fc4s(lport->host)[7] = 1;
1583 fc_host_maxframe_size(lport->host) = lport->mfs;
1584 fc_host_supported_speeds(lport->host) = 0;
1585 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1586 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1587 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1588 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1589
1590 return 0;
1591 }
1592 EXPORT_SYMBOL(fc_lport_init);
1593
1594 /**
1595 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1596 * @sp: The sequence for the FC Passthrough response
1597 * @fp: The response frame
1598 * @info_arg: The BSG info that the response is for
1599 */
1600 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1601 void *info_arg)
1602 {
1603 struct fc_bsg_info *info = info_arg;
1604 struct fc_bsg_job *job = info->job;
1605 struct fc_lport *lport = info->lport;
1606 struct fc_frame_header *fh;
1607 size_t len;
1608 void *buf;
1609
1610 if (IS_ERR(fp)) {
1611 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1612 -ECONNABORTED : -ETIMEDOUT;
1613 job->reply_len = sizeof(uint32_t);
1614 job->state_flags |= FC_RQST_STATE_DONE;
1615 job->job_done(job);
1616 kfree(info);
1617 return;
1618 }
1619
1620 mutex_lock(&lport->lp_mutex);
1621 fh = fc_frame_header_get(fp);
1622 len = fr_len(fp) - sizeof(*fh);
1623 buf = fc_frame_payload_get(fp, 0);
1624
1625 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1626 /* Get the response code from the first frame payload */
1627 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1628 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1629 (unsigned short)fc_frame_payload_op(fp);
1630
1631 /* Save the reply status of the job */
1632 job->reply->reply_data.ctels_reply.status =
1633 (cmd == info->rsp_code) ?
1634 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1635 }
1636
1637 job->reply->reply_payload_rcv_len +=
1638 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1639 &info->offset, KM_BIO_SRC_IRQ, NULL);
1640
1641 if (fr_eof(fp) == FC_EOF_T &&
1642 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1643 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1644 if (job->reply->reply_payload_rcv_len >
1645 job->reply_payload.payload_len)
1646 job->reply->reply_payload_rcv_len =
1647 job->reply_payload.payload_len;
1648 job->reply->result = 0;
1649 job->state_flags |= FC_RQST_STATE_DONE;
1650 job->job_done(job);
1651 kfree(info);
1652 }
1653 fc_frame_free(fp);
1654 mutex_unlock(&lport->lp_mutex);
1655 }
1656
1657 /**
1658 * fc_lport_els_request() - Send ELS passthrough request
1659 * @job: The BSG Passthrough job
1660 * @lport: The local port sending the request
1661 * @did: The destination port id
1662 *
1663 * Locking Note: The lport lock is expected to be held before calling
1664 * this routine.
1665 */
1666 static int fc_lport_els_request(struct fc_bsg_job *job,
1667 struct fc_lport *lport,
1668 u32 did, u32 tov)
1669 {
1670 struct fc_bsg_info *info;
1671 struct fc_frame *fp;
1672 struct fc_frame_header *fh;
1673 char *pp;
1674 int len;
1675
1676 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1677 if (!fp)
1678 return -ENOMEM;
1679
1680 len = job->request_payload.payload_len;
1681 pp = fc_frame_payload_get(fp, len);
1682
1683 sg_copy_to_buffer(job->request_payload.sg_list,
1684 job->request_payload.sg_cnt,
1685 pp, len);
1686
1687 fh = fc_frame_header_get(fp);
1688 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1689 hton24(fh->fh_d_id, did);
1690 hton24(fh->fh_s_id, lport->port_id);
1691 fh->fh_type = FC_TYPE_ELS;
1692 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1693 fh->fh_cs_ctl = 0;
1694 fh->fh_df_ctl = 0;
1695 fh->fh_parm_offset = 0;
1696
1697 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1698 if (!info) {
1699 fc_frame_free(fp);
1700 return -ENOMEM;
1701 }
1702
1703 info->job = job;
1704 info->lport = lport;
1705 info->rsp_code = ELS_LS_ACC;
1706 info->nents = job->reply_payload.sg_cnt;
1707 info->sg = job->reply_payload.sg_list;
1708
1709 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1710 NULL, info, tov))
1711 return -ECOMM;
1712 return 0;
1713 }
1714
1715 /**
1716 * fc_lport_ct_request() - Send CT Passthrough request
1717 * @job: The BSG Passthrough job
1718 * @lport: The local port sending the request
1719 * @did: The destination FC-ID
1720 * @tov: The timeout period to wait for the response
1721 *
1722 * Locking Note: The lport lock is expected to be held before calling
1723 * this routine.
1724 */
1725 static int fc_lport_ct_request(struct fc_bsg_job *job,
1726 struct fc_lport *lport, u32 did, u32 tov)
1727 {
1728 struct fc_bsg_info *info;
1729 struct fc_frame *fp;
1730 struct fc_frame_header *fh;
1731 struct fc_ct_req *ct;
1732 size_t len;
1733
1734 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1735 job->request_payload.payload_len);
1736 if (!fp)
1737 return -ENOMEM;
1738
1739 len = job->request_payload.payload_len;
1740 ct = fc_frame_payload_get(fp, len);
1741
1742 sg_copy_to_buffer(job->request_payload.sg_list,
1743 job->request_payload.sg_cnt,
1744 ct, len);
1745
1746 fh = fc_frame_header_get(fp);
1747 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1748 hton24(fh->fh_d_id, did);
1749 hton24(fh->fh_s_id, lport->port_id);
1750 fh->fh_type = FC_TYPE_CT;
1751 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1752 fh->fh_cs_ctl = 0;
1753 fh->fh_df_ctl = 0;
1754 fh->fh_parm_offset = 0;
1755
1756 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1757 if (!info) {
1758 fc_frame_free(fp);
1759 return -ENOMEM;
1760 }
1761
1762 info->job = job;
1763 info->lport = lport;
1764 info->rsp_code = FC_FS_ACC;
1765 info->nents = job->reply_payload.sg_cnt;
1766 info->sg = job->reply_payload.sg_list;
1767
1768 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1769 NULL, info, tov))
1770 return -ECOMM;
1771 return 0;
1772 }
1773
1774 /**
1775 * fc_lport_bsg_request() - The common entry point for sending
1776 * FC Passthrough requests
1777 * @job: The BSG passthrough job
1778 */
1779 int fc_lport_bsg_request(struct fc_bsg_job *job)
1780 {
1781 struct request *rsp = job->req->next_rq;
1782 struct Scsi_Host *shost = job->shost;
1783 struct fc_lport *lport = shost_priv(shost);
1784 struct fc_rport *rport;
1785 struct fc_rport_priv *rdata;
1786 int rc = -EINVAL;
1787 u32 did;
1788
1789 job->reply->reply_payload_rcv_len = 0;
1790 if (rsp)
1791 rsp->resid_len = job->reply_payload.payload_len;
1792
1793 mutex_lock(&lport->lp_mutex);
1794
1795 switch (job->request->msgcode) {
1796 case FC_BSG_RPT_ELS:
1797 rport = job->rport;
1798 if (!rport)
1799 break;
1800
1801 rdata = rport->dd_data;
1802 rc = fc_lport_els_request(job, lport, rport->port_id,
1803 rdata->e_d_tov);
1804 break;
1805
1806 case FC_BSG_RPT_CT:
1807 rport = job->rport;
1808 if (!rport)
1809 break;
1810
1811 rdata = rport->dd_data;
1812 rc = fc_lport_ct_request(job, lport, rport->port_id,
1813 rdata->e_d_tov);
1814 break;
1815
1816 case FC_BSG_HST_CT:
1817 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1818 if (did == FC_FID_DIR_SERV)
1819 rdata = lport->dns_rdata;
1820 else
1821 rdata = lport->tt.rport_lookup(lport, did);
1822
1823 if (!rdata)
1824 break;
1825
1826 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1827 break;
1828
1829 case FC_BSG_HST_ELS_NOLOGIN:
1830 did = ntoh24(job->request->rqst_data.h_els.port_id);
1831 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1832 break;
1833 }
1834
1835 mutex_unlock(&lport->lp_mutex);
1836 return rc;
1837 }
1838 EXPORT_SYMBOL(fc_lport_bsg_request);