1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
37 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_version.h"
49 struct lpfc_bsg_event
{
50 struct list_head node
;
54 /* Event type and waiter identifiers */
59 /* next two flags are here for the auto-delete logic */
60 unsigned long wait_time_stamp
;
63 /* seen and not seen events */
64 struct list_head events_to_get
;
65 struct list_head events_to_see
;
67 /* driver data associated with the job */
71 struct lpfc_bsg_iocb
{
72 struct lpfc_iocbq
*cmdiocbq
;
73 struct lpfc_dmabuf
*rmp
;
74 struct lpfc_nodelist
*ndlp
;
77 struct lpfc_bsg_mbox
{
80 struct lpfc_dmabuf
*dmabuffers
; /* for BIU diags */
81 uint8_t *ext
; /* extended mailbox data */
82 uint32_t mbOffset
; /* from app */
83 uint32_t inExtWLen
; /* from app */
84 uint32_t outExtWLen
; /* from app */
87 #define MENLO_DID 0x0000FC0E
89 struct lpfc_bsg_menlo
{
90 struct lpfc_iocbq
*cmdiocbq
;
91 struct lpfc_dmabuf
*rmp
;
100 struct fc_bsg_job
*set_job
; /* job waiting for this iocb to finish */
102 struct lpfc_bsg_event
*evt
;
103 struct lpfc_bsg_iocb iocb
;
104 struct lpfc_bsg_mbox mbox
;
105 struct lpfc_bsg_menlo menlo
;
110 struct list_head node
;
117 #define BUF_SZ_4K 4096
118 #define SLI_CT_ELX_LOOPBACK 0x10
120 enum ELX_LOOPBACK_CMD
{
121 ELX_LOOPBACK_XRI_SETUP
,
125 #define ELX_LOOPBACK_HEADER_SZ \
126 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
128 struct lpfc_dmabufext
{
129 struct lpfc_dmabuf dma
;
135 lpfc_free_bsg_buffers(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*mlist
)
137 struct lpfc_dmabuf
*mlast
, *next_mlast
;
140 list_for_each_entry_safe(mlast
, next_mlast
, &mlist
->list
,
142 lpfc_mbuf_free(phba
, mlast
->virt
, mlast
->phys
);
143 list_del(&mlast
->list
);
146 lpfc_mbuf_free(phba
, mlist
->virt
, mlist
->phys
);
152 static struct lpfc_dmabuf
*
153 lpfc_alloc_bsg_buffers(struct lpfc_hba
*phba
, unsigned int size
,
154 int outbound_buffers
, struct ulp_bde64
*bpl
,
157 struct lpfc_dmabuf
*mlist
= NULL
;
158 struct lpfc_dmabuf
*mp
;
159 unsigned int bytes_left
= size
;
161 /* Verify we can support the size specified */
162 if (!size
|| (size
> (*bpl_entries
* LPFC_BPL_SIZE
)))
165 /* Determine the number of dma buffers to allocate */
166 *bpl_entries
= (size
% LPFC_BPL_SIZE
? size
/LPFC_BPL_SIZE
+ 1 :
169 /* Allocate dma buffer and place in BPL passed */
171 /* Allocate dma buffer */
172 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
175 lpfc_free_bsg_buffers(phba
, mlist
);
179 INIT_LIST_HEAD(&mp
->list
);
180 mp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &(mp
->phys
));
185 lpfc_free_bsg_buffers(phba
, mlist
);
189 /* Queue it to a linked list */
193 list_add_tail(&mp
->list
, &mlist
->list
);
195 /* Add buffer to buffer pointer list */
196 if (outbound_buffers
)
197 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
199 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
200 bpl
->addrLow
= le32_to_cpu(putPaddrLow(mp
->phys
));
201 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(mp
->phys
));
202 bpl
->tus
.f
.bdeSize
= (uint16_t)
203 (bytes_left
>= LPFC_BPL_SIZE
? LPFC_BPL_SIZE
:
205 bytes_left
-= bpl
->tus
.f
.bdeSize
;
206 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
213 lpfc_bsg_copy_data(struct lpfc_dmabuf
*dma_buffers
,
214 struct fc_bsg_buffer
*bsg_buffers
,
215 unsigned int bytes_to_transfer
, int to_buffers
)
218 struct lpfc_dmabuf
*mp
;
219 unsigned int transfer_bytes
, bytes_copied
= 0;
220 unsigned int sg_offset
, dma_offset
;
221 unsigned char *dma_address
, *sg_address
;
222 LIST_HEAD(temp_list
);
223 struct sg_mapping_iter miter
;
225 unsigned int sg_flags
= SG_MITER_ATOMIC
;
228 list_splice_init(&dma_buffers
->list
, &temp_list
);
229 list_add(&dma_buffers
->list
, &temp_list
);
232 sg_flags
|= SG_MITER_FROM_SG
;
234 sg_flags
|= SG_MITER_TO_SG
;
235 sg_miter_start(&miter
, bsg_buffers
->sg_list
, bsg_buffers
->sg_cnt
,
237 local_irq_save(flags
);
238 sg_valid
= sg_miter_next(&miter
);
239 list_for_each_entry(mp
, &temp_list
, list
) {
241 while (bytes_to_transfer
&& sg_valid
&&
242 (dma_offset
< LPFC_BPL_SIZE
)) {
243 dma_address
= mp
->virt
+ dma_offset
;
245 /* Continue previous partial transfer of sg */
246 sg_address
= miter
.addr
+ sg_offset
;
247 transfer_bytes
= miter
.length
- sg_offset
;
249 sg_address
= miter
.addr
;
250 transfer_bytes
= miter
.length
;
252 if (bytes_to_transfer
< transfer_bytes
)
253 transfer_bytes
= bytes_to_transfer
;
254 if (transfer_bytes
> (LPFC_BPL_SIZE
- dma_offset
))
255 transfer_bytes
= LPFC_BPL_SIZE
- dma_offset
;
257 memcpy(dma_address
, sg_address
, transfer_bytes
);
259 memcpy(sg_address
, dma_address
, transfer_bytes
);
260 dma_offset
+= transfer_bytes
;
261 sg_offset
+= transfer_bytes
;
262 bytes_to_transfer
-= transfer_bytes
;
263 bytes_copied
+= transfer_bytes
;
264 if (sg_offset
>= miter
.length
) {
266 sg_valid
= sg_miter_next(&miter
);
270 sg_miter_stop(&miter
);
271 local_irq_restore(flags
);
272 list_del_init(&dma_buffers
->list
);
273 list_splice(&temp_list
, &dma_buffers
->list
);
278 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
279 * @phba: Pointer to HBA context object.
280 * @cmdiocbq: Pointer to command iocb.
281 * @rspiocbq: Pointer to response iocb.
283 * This function is the completion handler for iocbs issued using
284 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
285 * ring event handler function without any lock held. This function
286 * can be called from both worker thread context and interrupt
287 * context. This function also can be called from another thread which
288 * cleans up the SLI layer objects.
289 * This function copies the contents of the response iocb to the
290 * response iocb memory object provided by the caller of
291 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
292 * sleeps for the iocb completion.
295 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba
*phba
,
296 struct lpfc_iocbq
*cmdiocbq
,
297 struct lpfc_iocbq
*rspiocbq
)
299 struct bsg_job_data
*dd_data
;
300 struct fc_bsg_job
*job
;
302 struct lpfc_dmabuf
*bmp
, *cmp
, *rmp
;
303 struct lpfc_nodelist
*ndlp
;
304 struct lpfc_bsg_iocb
*iocb
;
306 unsigned int rsp_size
;
309 dd_data
= cmdiocbq
->context1
;
311 /* Determine if job has been aborted */
312 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
313 job
= dd_data
->set_job
;
315 /* Prevent timeout handling from trying to abort job */
318 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
320 iocb
= &dd_data
->context_un
.iocb
;
323 cmp
= cmdiocbq
->context2
;
324 bmp
= cmdiocbq
->context3
;
325 rsp
= &rspiocbq
->iocb
;
327 /* Copy the completed data or set the error status */
330 if (rsp
->ulpStatus
) {
331 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
332 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
333 case IOERR_SEQUENCE_TIMEOUT
:
336 case IOERR_INVALID_RPI
:
347 rsp_size
= rsp
->un
.genreq64
.bdl
.bdeSize
;
348 job
->reply
->reply_payload_rcv_len
=
349 lpfc_bsg_copy_data(rmp
, &job
->reply_payload
,
354 lpfc_free_bsg_buffers(phba
, cmp
);
355 lpfc_free_bsg_buffers(phba
, rmp
);
356 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
358 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
362 /* Complete the job if the job is still active */
365 job
->reply
->result
= rc
;
372 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
373 * @job: fc_bsg_job to handle
376 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job
*job
)
378 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
379 struct lpfc_hba
*phba
= vport
->phba
;
380 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
381 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
382 struct ulp_bde64
*bpl
= NULL
;
384 struct lpfc_iocbq
*cmdiocbq
= NULL
;
386 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
, *rmp
= NULL
;
389 struct bsg_job_data
*dd_data
;
394 /* in case no data is transferred */
395 job
->reply
->reply_payload_rcv_len
= 0;
397 /* allocate our bsg tracking structure */
398 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
400 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
401 "2733 Failed allocation of dd_data\n");
406 if (!lpfc_nlp_get(ndlp
)) {
411 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
416 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
422 cmd
= &cmdiocbq
->iocb
;
424 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
429 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
435 INIT_LIST_HEAD(&bmp
->list
);
437 bpl
= (struct ulp_bde64
*) bmp
->virt
;
438 request_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
);
439 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
440 1, bpl
, &request_nseg
);
445 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
446 job
->request_payload
.payload_len
, 1);
449 reply_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
) - request_nseg
;
450 rmp
= lpfc_alloc_bsg_buffers(phba
, job
->reply_payload
.payload_len
, 0,
457 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
458 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
459 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
460 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
461 cmd
->un
.genreq64
.bdl
.bdeSize
=
462 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
463 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
464 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
465 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
466 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
467 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
468 cmd
->ulpBdeCount
= 1;
470 cmd
->ulpClass
= CLASS3
;
471 cmd
->ulpContext
= ndlp
->nlp_rpi
;
472 if (phba
->sli_rev
== LPFC_SLI_REV4
)
473 cmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
474 cmd
->ulpOwner
= OWN_CHIP
;
475 cmdiocbq
->vport
= phba
->pport
;
476 cmdiocbq
->context3
= bmp
;
477 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
478 timeout
= phba
->fc_ratov
* 2;
479 cmd
->ulpTimeout
= timeout
;
481 cmdiocbq
->iocb_cmpl
= lpfc_bsg_send_mgmt_cmd_cmp
;
482 cmdiocbq
->context1
= dd_data
;
483 cmdiocbq
->context2
= cmp
;
484 cmdiocbq
->context3
= bmp
;
485 cmdiocbq
->context_un
.ndlp
= ndlp
;
486 dd_data
->type
= TYPE_IOCB
;
487 dd_data
->set_job
= job
;
488 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
489 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
490 dd_data
->context_un
.iocb
.rmp
= rmp
;
491 job
->dd_data
= dd_data
;
493 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
494 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
498 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
499 writel(creg_val
, phba
->HCregaddr
);
500 readl(phba
->HCregaddr
); /* flush */
503 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
504 if (iocb_stat
== IOCB_SUCCESS
)
505 return 0; /* done for now */
506 else if (iocb_stat
== IOCB_BUSY
)
511 /* iocb failed so cleanup */
514 lpfc_free_bsg_buffers(phba
, rmp
);
516 lpfc_free_bsg_buffers(phba
, cmp
);
519 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
522 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
528 /* make error code available to userspace */
529 job
->reply
->result
= rc
;
535 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
536 * @phba: Pointer to HBA context object.
537 * @cmdiocbq: Pointer to command iocb.
538 * @rspiocbq: Pointer to response iocb.
540 * This function is the completion handler for iocbs issued using
541 * lpfc_bsg_rport_els_cmp function. This function is called by the
542 * ring event handler function without any lock held. This function
543 * can be called from both worker thread context and interrupt
544 * context. This function also can be called from other thread which
545 * cleans up the SLI layer objects.
546 * This function copies the contents of the response iocb to the
547 * response iocb memory object provided by the caller of
548 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
549 * sleeps for the iocb completion.
552 lpfc_bsg_rport_els_cmp(struct lpfc_hba
*phba
,
553 struct lpfc_iocbq
*cmdiocbq
,
554 struct lpfc_iocbq
*rspiocbq
)
556 struct bsg_job_data
*dd_data
;
557 struct fc_bsg_job
*job
;
559 struct lpfc_nodelist
*ndlp
;
560 struct lpfc_dmabuf
*pcmd
= NULL
, *prsp
= NULL
;
561 struct fc_bsg_ctels_reply
*els_reply
;
564 unsigned int rsp_size
;
567 dd_data
= cmdiocbq
->context1
;
568 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
569 cmdiocbq
->context1
= ndlp
;
571 /* Determine if job has been aborted */
572 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
573 job
= dd_data
->set_job
;
575 /* Prevent timeout handling from trying to abort job */
578 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
580 rsp
= &rspiocbq
->iocb
;
581 pcmd
= (struct lpfc_dmabuf
*)cmdiocbq
->context2
;
582 prsp
= (struct lpfc_dmabuf
*)pcmd
->list
.next
;
584 /* Copy the completed job data or determine the job status if job is
589 if (rsp
->ulpStatus
== IOSTAT_SUCCESS
) {
590 rsp_size
= rsp
->un
.elsreq64
.bdl
.bdeSize
;
591 job
->reply
->reply_payload_rcv_len
=
592 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
593 job
->reply_payload
.sg_cnt
,
596 } else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
597 job
->reply
->reply_payload_rcv_len
=
598 sizeof(struct fc_bsg_ctels_reply
);
599 /* LS_RJT data returned in word 4 */
600 rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
601 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
602 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
603 els_reply
->rjt_data
.action
= rjt_data
[3];
604 els_reply
->rjt_data
.reason_code
= rjt_data
[2];
605 els_reply
->rjt_data
.reason_explanation
= rjt_data
[1];
606 els_reply
->rjt_data
.vendor_unique
= rjt_data
[0];
613 lpfc_els_free_iocb(phba
, cmdiocbq
);
616 /* Complete the job if the job is still active */
619 job
->reply
->result
= rc
;
626 * lpfc_bsg_rport_els - send an ELS command from a bsg request
627 * @job: fc_bsg_job to handle
630 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
632 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
633 struct lpfc_hba
*phba
= vport
->phba
;
634 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
635 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
639 struct lpfc_iocbq
*cmdiocbq
;
641 struct bsg_job_data
*dd_data
;
645 /* in case no data is transferred */
646 job
->reply
->reply_payload_rcv_len
= 0;
648 /* verify the els command is not greater than the
649 * maximum ELS transfer size.
652 if (job
->request_payload
.payload_len
> FCELSSIZE
) {
657 /* allocate our bsg tracking structure */
658 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
660 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
661 "2735 Failed allocation of dd_data\n");
666 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
667 cmdsize
= job
->request_payload
.payload_len
;
668 rspsize
= job
->reply_payload
.payload_len
;
670 if (!lpfc_nlp_get(ndlp
)) {
675 /* We will use the allocated dma buffers by prep els iocb for command
676 * and response to ensure if the job times out and the request is freed,
677 * we won't be dma into memory that is no longer allocated to for the
681 cmdiocbq
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
,
682 ndlp
->nlp_DID
, elscmd
);
690 /* Transfer the request payload to allocated command dma buffer */
692 sg_copy_to_buffer(job
->request_payload
.sg_list
,
693 job
->request_payload
.sg_cnt
,
694 ((struct lpfc_dmabuf
*)cmdiocbq
->context2
)->virt
,
697 if (phba
->sli_rev
== LPFC_SLI_REV4
)
698 cmdiocbq
->iocb
.ulpContext
= phba
->sli4_hba
.rpi_ids
[rpi
];
700 cmdiocbq
->iocb
.ulpContext
= rpi
;
701 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
702 cmdiocbq
->context1
= dd_data
;
703 cmdiocbq
->context_un
.ndlp
= ndlp
;
704 cmdiocbq
->iocb_cmpl
= lpfc_bsg_rport_els_cmp
;
705 dd_data
->type
= TYPE_IOCB
;
706 dd_data
->set_job
= job
;
707 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
708 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
709 dd_data
->context_un
.iocb
.rmp
= NULL
;
710 job
->dd_data
= dd_data
;
712 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
713 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
717 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
718 writel(creg_val
, phba
->HCregaddr
);
719 readl(phba
->HCregaddr
); /* flush */
722 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
724 if (rc
== IOCB_SUCCESS
)
725 return 0; /* done for now */
726 else if (rc
== IOCB_BUSY
)
733 cmdiocbq
->context1
= ndlp
;
734 lpfc_els_free_iocb(phba
, cmdiocbq
);
743 /* make error code available to userspace */
744 job
->reply
->result
= rc
;
750 * lpfc_bsg_event_free - frees an allocated event structure
751 * @kref: Pointer to a kref.
753 * Called from kref_put. Back cast the kref into an event structure address.
754 * Free any events to get, delete associated nodes, free any events to see,
755 * free any data then free the event itself.
758 lpfc_bsg_event_free(struct kref
*kref
)
760 struct lpfc_bsg_event
*evt
= container_of(kref
, struct lpfc_bsg_event
,
762 struct event_data
*ed
;
764 list_del(&evt
->node
);
766 while (!list_empty(&evt
->events_to_get
)) {
767 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
773 while (!list_empty(&evt
->events_to_see
)) {
774 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
785 * lpfc_bsg_event_ref - increments the kref for an event
786 * @evt: Pointer to an event structure.
789 lpfc_bsg_event_ref(struct lpfc_bsg_event
*evt
)
791 kref_get(&evt
->kref
);
795 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
796 * @evt: Pointer to an event structure.
799 lpfc_bsg_event_unref(struct lpfc_bsg_event
*evt
)
801 kref_put(&evt
->kref
, lpfc_bsg_event_free
);
805 * lpfc_bsg_event_new - allocate and initialize a event structure
806 * @ev_mask: Mask of events.
807 * @ev_reg_id: Event reg id.
808 * @ev_req_id: Event request id.
810 static struct lpfc_bsg_event
*
811 lpfc_bsg_event_new(uint32_t ev_mask
, int ev_reg_id
, uint32_t ev_req_id
)
813 struct lpfc_bsg_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
818 INIT_LIST_HEAD(&evt
->events_to_get
);
819 INIT_LIST_HEAD(&evt
->events_to_see
);
820 evt
->type_mask
= ev_mask
;
821 evt
->req_id
= ev_req_id
;
822 evt
->reg_id
= ev_reg_id
;
823 evt
->wait_time_stamp
= jiffies
;
825 init_waitqueue_head(&evt
->wq
);
826 kref_init(&evt
->kref
);
831 * diag_cmd_data_free - Frees an lpfc dma buffer extension
832 * @phba: Pointer to HBA context object.
833 * @mlist: Pointer to an lpfc dma buffer extension.
836 diag_cmd_data_free(struct lpfc_hba
*phba
, struct lpfc_dmabufext
*mlist
)
838 struct lpfc_dmabufext
*mlast
;
839 struct pci_dev
*pcidev
;
840 struct list_head head
, *curr
, *next
;
842 if ((!mlist
) || (!lpfc_is_link_up(phba
) &&
843 (phba
->link_flag
& LS_LOOPBACK_MODE
))) {
847 pcidev
= phba
->pcidev
;
848 list_add_tail(&head
, &mlist
->dma
.list
);
850 list_for_each_safe(curr
, next
, &head
) {
851 mlast
= list_entry(curr
, struct lpfc_dmabufext
, dma
.list
);
853 dma_free_coherent(&pcidev
->dev
,
863 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
868 * This function is called when an unsolicited CT command is received. It
869 * forwards the event to any processes registered to receive CT events.
872 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
873 struct lpfc_iocbq
*piocbq
)
875 uint32_t evt_req_id
= 0;
878 struct lpfc_dmabuf
*dmabuf
= NULL
;
879 struct lpfc_bsg_event
*evt
;
880 struct event_data
*evt_dat
= NULL
;
881 struct lpfc_iocbq
*iocbq
;
883 struct list_head head
;
884 struct ulp_bde64
*bde
;
887 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
888 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
889 struct lpfc_hbq_entry
*hbqe
;
890 struct lpfc_sli_ct_request
*ct_req
;
891 struct fc_bsg_job
*job
= NULL
;
892 struct bsg_job_data
*dd_data
= NULL
;
896 INIT_LIST_HEAD(&head
);
897 list_add_tail(&head
, &piocbq
->list
);
899 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
900 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
901 goto error_ct_unsol_exit
;
903 if (phba
->link_state
== LPFC_HBA_ERROR
||
904 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)))
905 goto error_ct_unsol_exit
;
907 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
910 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
911 piocbq
->iocb
.un
.cont64
[0].addrLow
);
912 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
915 goto error_ct_unsol_exit
;
916 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
917 evt_req_id
= ct_req
->FsType
;
918 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
919 len
= ct_req
->CommandResponse
.bits
.Size
;
920 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
921 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
923 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
924 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
925 if (!(evt
->type_mask
& FC_REG_CT_EVENT
) ||
926 evt
->req_id
!= evt_req_id
)
929 lpfc_bsg_event_ref(evt
);
930 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
931 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
932 if (evt_dat
== NULL
) {
933 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
934 lpfc_bsg_event_unref(evt
);
935 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
936 "2614 Memory allocation failed for "
941 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
942 /* take accumulated byte count from the last iocbq */
943 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
944 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
946 list_for_each_entry(iocbq
, &head
, list
) {
947 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
949 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
953 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
954 if (evt_dat
->data
== NULL
) {
955 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
956 "2615 Memory allocation failed for "
957 "CT event data, size %d\n",
960 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
961 lpfc_bsg_event_unref(evt
);
962 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
963 goto error_ct_unsol_exit
;
966 list_for_each_entry(iocbq
, &head
, list
) {
968 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
969 bdeBuf1
= iocbq
->context2
;
970 bdeBuf2
= iocbq
->context3
;
972 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
973 if (phba
->sli3_options
&
974 LPFC_SLI3_HBQ_ENABLED
) {
976 hbqe
= (struct lpfc_hbq_entry
*)
977 &iocbq
->iocb
.un
.ulpWord
[0];
978 size
= hbqe
->bde
.tus
.f
.bdeSize
;
981 hbqe
= (struct lpfc_hbq_entry
*)
984 size
= hbqe
->bde
.tus
.f
.bdeSize
;
987 if ((offset
+ size
) > evt_dat
->len
)
988 size
= evt_dat
->len
- offset
;
990 size
= iocbq
->iocb
.un
.cont64
[i
].
992 bde
= &iocbq
->iocb
.un
.cont64
[i
];
993 dma_addr
= getPaddr(bde
->addrHigh
,
995 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
999 lpfc_printf_log(phba
, KERN_ERR
,
1000 LOG_LIBDFC
, "2616 No dmabuf "
1001 "found for iocbq 0x%p\n",
1003 kfree(evt_dat
->data
);
1005 spin_lock_irqsave(&phba
->ct_ev_lock
,
1007 lpfc_bsg_event_unref(evt
);
1008 spin_unlock_irqrestore(
1009 &phba
->ct_ev_lock
, flags
);
1010 goto error_ct_unsol_exit
;
1012 memcpy((char *)(evt_dat
->data
) + offset
,
1013 dmabuf
->virt
, size
);
1015 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
1016 !(phba
->sli3_options
&
1017 LPFC_SLI3_HBQ_ENABLED
)) {
1018 lpfc_sli_ringpostbuf_put(phba
, pring
,
1022 case ELX_LOOPBACK_DATA
:
1025 diag_cmd_data_free(phba
,
1026 (struct lpfc_dmabufext
1029 case ELX_LOOPBACK_XRI_SETUP
:
1030 if ((phba
->sli_rev
==
1032 (phba
->sli3_options
&
1033 LPFC_SLI3_HBQ_ENABLED
1035 lpfc_in_buf_free(phba
,
1038 lpfc_post_buffer(phba
,
1044 if (!(phba
->sli3_options
&
1045 LPFC_SLI3_HBQ_ENABLED
))
1046 lpfc_post_buffer(phba
,
1055 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1056 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1057 evt_dat
->immed_dat
= phba
->ctx_idx
;
1058 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % LPFC_CT_CTX_MAX
;
1059 /* Provide warning for over-run of the ct_ctx array */
1060 if (phba
->ct_ctx
[evt_dat
->immed_dat
].valid
==
1062 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1063 "2717 CT context array entry "
1064 "[%d] over-run: oxid:x%x, "
1065 "sid:x%x\n", phba
->ctx_idx
,
1067 evt_dat
->immed_dat
].oxid
,
1069 evt_dat
->immed_dat
].SID
);
1070 phba
->ct_ctx
[evt_dat
->immed_dat
].rxid
=
1071 piocbq
->iocb
.ulpContext
;
1072 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
1073 piocbq
->iocb
.unsli3
.rcvsli3
.ox_id
;
1074 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
1075 piocbq
->iocb
.un
.rcvels
.remoteID
;
1076 phba
->ct_ctx
[evt_dat
->immed_dat
].valid
= UNSOL_VALID
;
1078 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
1080 evt_dat
->type
= FC_REG_CT_EVENT
;
1081 list_add(&evt_dat
->node
, &evt
->events_to_see
);
1082 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
) {
1083 wake_up_interruptible(&evt
->wq
);
1084 lpfc_bsg_event_unref(evt
);
1088 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
1090 dd_data
= (struct bsg_job_data
*)evt
->dd_data
;
1091 job
= dd_data
->set_job
;
1092 dd_data
->set_job
= NULL
;
1093 lpfc_bsg_event_unref(evt
);
1095 job
->reply
->reply_payload_rcv_len
= size
;
1096 /* make error code available to userspace */
1097 job
->reply
->result
= 0;
1098 job
->dd_data
= NULL
;
1099 /* complete the job back to userspace */
1100 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1102 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1105 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1107 error_ct_unsol_exit
:
1108 if (!list_empty(&head
))
1110 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
1111 (evt_req_id
== SLI_CT_ELX_LOOPBACK
))
1117 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1118 * @phba: Pointer to HBA context object.
1119 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1121 * This function handles abort to the CT command toward management plane
1124 * If the pending context of a CT command to management plane present, clears
1125 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1126 * no context exists.
1129 lpfc_bsg_ct_unsol_abort(struct lpfc_hba
*phba
, struct hbq_dmabuf
*dmabuf
)
1131 struct fc_frame_header fc_hdr
;
1132 struct fc_frame_header
*fc_hdr_ptr
= &fc_hdr
;
1133 int ctx_idx
, handled
= 0;
1134 uint16_t oxid
, rxid
;
1137 memcpy(fc_hdr_ptr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
1138 sid
= sli4_sid_from_fc_hdr(fc_hdr_ptr
);
1139 oxid
= be16_to_cpu(fc_hdr_ptr
->fh_ox_id
);
1140 rxid
= be16_to_cpu(fc_hdr_ptr
->fh_rx_id
);
1142 for (ctx_idx
= 0; ctx_idx
< LPFC_CT_CTX_MAX
; ctx_idx
++) {
1143 if (phba
->ct_ctx
[ctx_idx
].valid
!= UNSOL_VALID
)
1145 if (phba
->ct_ctx
[ctx_idx
].rxid
!= rxid
)
1147 if (phba
->ct_ctx
[ctx_idx
].oxid
!= oxid
)
1149 if (phba
->ct_ctx
[ctx_idx
].SID
!= sid
)
1151 phba
->ct_ctx
[ctx_idx
].valid
= UNSOL_INVALID
;
1158 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1159 * @job: SET_EVENT fc_bsg_job
1162 lpfc_bsg_hba_set_event(struct fc_bsg_job
*job
)
1164 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1165 struct lpfc_hba
*phba
= vport
->phba
;
1166 struct set_ct_event
*event_req
;
1167 struct lpfc_bsg_event
*evt
;
1169 struct bsg_job_data
*dd_data
= NULL
;
1171 unsigned long flags
;
1173 if (job
->request_len
<
1174 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
1175 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1176 "2612 Received SET_CT_EVENT below minimum "
1182 event_req
= (struct set_ct_event
*)
1183 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1184 ev_mask
= ((uint32_t)(unsigned long)event_req
->type_mask
&
1186 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1187 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1188 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1189 lpfc_bsg_event_ref(evt
);
1190 evt
->wait_time_stamp
= jiffies
;
1191 dd_data
= (struct bsg_job_data
*)evt
->dd_data
;
1195 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1197 if (&evt
->node
== &phba
->ct_ev_waiters
) {
1198 /* no event waiting struct yet - first call */
1199 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1200 if (dd_data
== NULL
) {
1201 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1202 "2734 Failed allocation of dd_data\n");
1206 evt
= lpfc_bsg_event_new(ev_mask
, event_req
->ev_reg_id
,
1207 event_req
->ev_req_id
);
1209 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1210 "2617 Failed allocation of event "
1215 dd_data
->type
= TYPE_EVT
;
1216 dd_data
->set_job
= NULL
;
1217 dd_data
->context_un
.evt
= evt
;
1218 evt
->dd_data
= (void *)dd_data
;
1219 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1220 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1221 lpfc_bsg_event_ref(evt
);
1222 evt
->wait_time_stamp
= jiffies
;
1223 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1226 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1228 dd_data
->set_job
= job
; /* for unsolicited command */
1229 job
->dd_data
= dd_data
; /* for fc transport timeout callback*/
1230 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1231 return 0; /* call job done later */
1234 if (dd_data
!= NULL
)
1237 job
->dd_data
= NULL
;
1242 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1243 * @job: GET_EVENT fc_bsg_job
1246 lpfc_bsg_hba_get_event(struct fc_bsg_job
*job
)
1248 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1249 struct lpfc_hba
*phba
= vport
->phba
;
1250 struct get_ct_event
*event_req
;
1251 struct get_ct_event_reply
*event_reply
;
1252 struct lpfc_bsg_event
*evt
;
1253 struct event_data
*evt_dat
= NULL
;
1254 unsigned long flags
;
1257 if (job
->request_len
<
1258 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
1259 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1260 "2613 Received GET_CT_EVENT request below "
1266 event_req
= (struct get_ct_event
*)
1267 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1269 event_reply
= (struct get_ct_event_reply
*)
1270 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
1271 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1272 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1273 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1274 if (list_empty(&evt
->events_to_get
))
1276 lpfc_bsg_event_ref(evt
);
1277 evt
->wait_time_stamp
= jiffies
;
1278 evt_dat
= list_entry(evt
->events_to_get
.prev
,
1279 struct event_data
, node
);
1280 list_del(&evt_dat
->node
);
1284 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1286 /* The app may continue to ask for event data until it gets
1287 * an error indicating that there isn't anymore
1289 if (evt_dat
== NULL
) {
1290 job
->reply
->reply_payload_rcv_len
= 0;
1295 if (evt_dat
->len
> job
->request_payload
.payload_len
) {
1296 evt_dat
->len
= job
->request_payload
.payload_len
;
1297 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1298 "2618 Truncated event data at %d "
1300 job
->request_payload
.payload_len
);
1303 event_reply
->type
= evt_dat
->type
;
1304 event_reply
->immed_data
= evt_dat
->immed_dat
;
1305 if (evt_dat
->len
> 0)
1306 job
->reply
->reply_payload_rcv_len
=
1307 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1308 job
->request_payload
.sg_cnt
,
1309 evt_dat
->data
, evt_dat
->len
);
1311 job
->reply
->reply_payload_rcv_len
= 0;
1314 kfree(evt_dat
->data
);
1318 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1319 lpfc_bsg_event_unref(evt
);
1320 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1321 job
->dd_data
= NULL
;
1322 job
->reply
->result
= 0;
1327 job
->dd_data
= NULL
;
1328 job
->reply
->result
= rc
;
1333 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1334 * @phba: Pointer to HBA context object.
1335 * @cmdiocbq: Pointer to command iocb.
1336 * @rspiocbq: Pointer to response iocb.
1338 * This function is the completion handler for iocbs issued using
1339 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1340 * ring event handler function without any lock held. This function
1341 * can be called from both worker thread context and interrupt
1342 * context. This function also can be called from other thread which
1343 * cleans up the SLI layer objects.
1344 * This function copy the contents of the response iocb to the
1345 * response iocb memory object provided by the caller of
1346 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1347 * sleeps for the iocb completion.
1350 lpfc_issue_ct_rsp_cmp(struct lpfc_hba
*phba
,
1351 struct lpfc_iocbq
*cmdiocbq
,
1352 struct lpfc_iocbq
*rspiocbq
)
1354 struct bsg_job_data
*dd_data
;
1355 struct fc_bsg_job
*job
;
1357 struct lpfc_dmabuf
*bmp
, *cmp
;
1358 struct lpfc_nodelist
*ndlp
;
1359 unsigned long flags
;
1362 dd_data
= cmdiocbq
->context1
;
1364 /* Determine if job has been aborted */
1365 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1366 job
= dd_data
->set_job
;
1368 /* Prevent timeout handling from trying to abort job */
1369 job
->dd_data
= NULL
;
1371 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1373 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
1374 cmp
= cmdiocbq
->context2
;
1375 bmp
= cmdiocbq
->context3
;
1376 rsp
= &rspiocbq
->iocb
;
1378 /* Copy the completed job data or set the error status */
1381 if (rsp
->ulpStatus
) {
1382 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
1383 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
1384 case IOERR_SEQUENCE_TIMEOUT
:
1387 case IOERR_INVALID_RPI
:
1398 job
->reply
->reply_payload_rcv_len
= 0;
1402 lpfc_free_bsg_buffers(phba
, cmp
);
1403 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1405 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1409 /* Complete the job if the job is still active */
1412 job
->reply
->result
= rc
;
1419 * lpfc_issue_ct_rsp - issue a ct response
1420 * @phba: Pointer to HBA context object.
1421 * @job: Pointer to the job object.
1422 * @tag: tag index value into the ports context exchange array.
1423 * @bmp: Pointer to a dma buffer descriptor.
1424 * @num_entry: Number of enties in the bde.
1427 lpfc_issue_ct_rsp(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
, uint32_t tag
,
1428 struct lpfc_dmabuf
*cmp
, struct lpfc_dmabuf
*bmp
,
1432 struct lpfc_iocbq
*ctiocb
= NULL
;
1434 struct lpfc_nodelist
*ndlp
= NULL
;
1435 struct bsg_job_data
*dd_data
;
1438 /* allocate our bsg tracking structure */
1439 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1441 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1442 "2736 Failed allocation of dd_data\n");
1447 /* Allocate buffer for command iocb */
1448 ctiocb
= lpfc_sli_get_iocbq(phba
);
1454 icmd
= &ctiocb
->iocb
;
1455 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
1456 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
1457 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
1458 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1459 icmd
->un
.xseq64
.bdl
.bdeSize
= (num_entry
* sizeof(struct ulp_bde64
));
1460 icmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
1461 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1462 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_SOL_CTL
;
1463 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1465 /* Fill in rest of iocb */
1466 icmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
1467 icmd
->ulpBdeCount
= 1;
1469 icmd
->ulpClass
= CLASS3
;
1470 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1471 /* Do not issue unsol response if oxid not marked as valid */
1472 if (phba
->ct_ctx
[tag
].valid
!= UNSOL_VALID
) {
1474 goto issue_ct_rsp_exit
;
1476 icmd
->ulpContext
= phba
->ct_ctx
[tag
].rxid
;
1477 icmd
->unsli3
.rcvsli3
.ox_id
= phba
->ct_ctx
[tag
].oxid
;
1478 ndlp
= lpfc_findnode_did(phba
->pport
, phba
->ct_ctx
[tag
].SID
);
1480 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1481 "2721 ndlp null for oxid %x SID %x\n",
1483 phba
->ct_ctx
[tag
].SID
);
1485 goto issue_ct_rsp_exit
;
1488 /* Check if the ndlp is active */
1489 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1491 goto issue_ct_rsp_exit
;
1494 /* get a refernece count so the ndlp doesn't go away while
1497 if (!lpfc_nlp_get(ndlp
)) {
1499 goto issue_ct_rsp_exit
;
1502 icmd
->un
.ulpWord
[3] =
1503 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
1505 /* The exchange is done, mark the entry as invalid */
1506 phba
->ct_ctx
[tag
].valid
= UNSOL_INVALID
;
1508 icmd
->ulpContext
= (ushort
) tag
;
1510 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
1512 /* Xmit CT response on exchange <xid> */
1513 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1514 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1515 icmd
->ulpContext
, icmd
->ulpIoTag
, tag
, phba
->link_state
);
1517 ctiocb
->iocb_cmpl
= NULL
;
1518 ctiocb
->iocb_flag
|= LPFC_IO_LIBDFC
;
1519 ctiocb
->vport
= phba
->pport
;
1520 ctiocb
->context1
= dd_data
;
1521 ctiocb
->context2
= cmp
;
1522 ctiocb
->context3
= bmp
;
1523 ctiocb
->context_un
.ndlp
= ndlp
;
1524 ctiocb
->iocb_cmpl
= lpfc_issue_ct_rsp_cmp
;
1526 dd_data
->type
= TYPE_IOCB
;
1527 dd_data
->set_job
= job
;
1528 dd_data
->context_un
.iocb
.cmdiocbq
= ctiocb
;
1529 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
1530 dd_data
->context_un
.iocb
.rmp
= NULL
;
1531 job
->dd_data
= dd_data
;
1533 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1534 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
1536 goto issue_ct_rsp_exit
;
1538 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
1539 writel(creg_val
, phba
->HCregaddr
);
1540 readl(phba
->HCregaddr
); /* flush */
1543 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
1545 if (rc
== IOCB_SUCCESS
)
1546 return 0; /* done for now */
1549 lpfc_sli_release_iocbq(phba
, ctiocb
);
1557 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1558 * @job: SEND_MGMT_RESP fc_bsg_job
1561 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job
*job
)
1563 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1564 struct lpfc_hba
*phba
= vport
->phba
;
1565 struct send_mgmt_resp
*mgmt_resp
= (struct send_mgmt_resp
*)
1566 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1567 struct ulp_bde64
*bpl
;
1568 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
;
1570 uint32_t tag
= mgmt_resp
->tag
;
1571 unsigned long reqbfrcnt
=
1572 (unsigned long)job
->request_payload
.payload_len
;
1575 /* in case no data is transferred */
1576 job
->reply
->reply_payload_rcv_len
= 0;
1578 if (!reqbfrcnt
|| (reqbfrcnt
> (80 * BUF_SZ_4K
))) {
1580 goto send_mgmt_rsp_exit
;
1583 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1586 goto send_mgmt_rsp_exit
;
1589 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
1592 goto send_mgmt_rsp_free_bmp
;
1595 INIT_LIST_HEAD(&bmp
->list
);
1596 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1597 bpl_entries
= (LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
));
1598 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
1599 1, bpl
, &bpl_entries
);
1602 goto send_mgmt_rsp_free_bmp
;
1604 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
1605 job
->request_payload
.payload_len
, 1);
1607 rc
= lpfc_issue_ct_rsp(phba
, job
, tag
, cmp
, bmp
, bpl_entries
);
1609 if (rc
== IOCB_SUCCESS
)
1610 return 0; /* done for now */
1614 lpfc_free_bsg_buffers(phba
, cmp
);
1616 send_mgmt_rsp_free_bmp
:
1618 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1621 /* make error code available to userspace */
1622 job
->reply
->result
= rc
;
1623 job
->dd_data
= NULL
;
1628 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1629 * @phba: Pointer to HBA context object.
1631 * This function is responsible for preparing driver for diag loopback
1635 lpfc_bsg_diag_mode_enter(struct lpfc_hba
*phba
)
1637 struct lpfc_vport
**vports
;
1638 struct Scsi_Host
*shost
;
1639 struct lpfc_sli
*psli
;
1640 struct lpfc_sli_ring
*pring
;
1647 pring
= &psli
->ring
[LPFC_FCP_RING
];
1651 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1652 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
1653 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)))
1656 vports
= lpfc_create_vport_work_array(phba
);
1658 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1659 shost
= lpfc_shost_from_vport(vports
[i
]);
1660 scsi_block_requests(shost
);
1662 lpfc_destroy_vport_work_array(phba
, vports
);
1664 shost
= lpfc_shost_from_vport(phba
->pport
);
1665 scsi_block_requests(shost
);
1668 while (!list_empty(&pring
->txcmplq
)) {
1669 if (i
++ > 500) /* wait up to 5 seconds */
1677 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1678 * @phba: Pointer to HBA context object.
1680 * This function is responsible for driver exit processing of setting up
1681 * diag loopback mode on device.
1684 lpfc_bsg_diag_mode_exit(struct lpfc_hba
*phba
)
1686 struct Scsi_Host
*shost
;
1687 struct lpfc_vport
**vports
;
1690 vports
= lpfc_create_vport_work_array(phba
);
1692 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1693 shost
= lpfc_shost_from_vport(vports
[i
]);
1694 scsi_unblock_requests(shost
);
1696 lpfc_destroy_vport_work_array(phba
, vports
);
1698 shost
= lpfc_shost_from_vport(phba
->pport
);
1699 scsi_unblock_requests(shost
);
1705 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1706 * @phba: Pointer to HBA context object.
1707 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1709 * This function is responsible for placing an sli3 port into diagnostic
1710 * loopback mode in order to perform a diagnostic loopback test.
1711 * All new scsi requests are blocked, a small delay is used to allow the
1712 * scsi requests to complete then the link is brought down. If the link is
1713 * is placed in loopback mode then scsi requests are again allowed
1714 * so the scsi mid-layer doesn't give up on the port.
1715 * All of this is done in-line.
1718 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
)
1720 struct diag_mode_set
*loopback_mode
;
1721 uint32_t link_flags
;
1723 LPFC_MBOXQ_t
*pmboxq
= NULL
;
1724 int mbxstatus
= MBX_SUCCESS
;
1728 /* no data to return just the return code */
1729 job
->reply
->reply_payload_rcv_len
= 0;
1731 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
1732 sizeof(struct diag_mode_set
)) {
1733 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1734 "2738 Received DIAG MODE request size:%d "
1735 "below the minimum size:%d\n",
1737 (int)(sizeof(struct fc_bsg_request
) +
1738 sizeof(struct diag_mode_set
)));
1743 rc
= lpfc_bsg_diag_mode_enter(phba
);
1747 /* bring the link to diagnostic mode */
1748 loopback_mode
= (struct diag_mode_set
*)
1749 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1750 link_flags
= loopback_mode
->type
;
1751 timeout
= loopback_mode
->timeout
* 100;
1753 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1756 goto loopback_mode_exit
;
1758 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1759 pmboxq
->u
.mb
.mbxCommand
= MBX_DOWN_LINK
;
1760 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1762 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1764 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0)) {
1765 /* wait for link down before proceeding */
1767 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
1768 if (i
++ > timeout
) {
1770 goto loopback_mode_exit
;
1775 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1776 if (link_flags
== INTERNAL_LOOP_BACK
)
1777 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
= FLAGS_LOCAL_LB
;
1779 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
=
1780 FLAGS_TOPOLOGY_MODE_LOOP
;
1782 pmboxq
->u
.mb
.mbxCommand
= MBX_INIT_LINK
;
1783 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1785 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
,
1788 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
))
1791 spin_lock_irq(&phba
->hbalock
);
1792 phba
->link_flag
|= LS_LOOPBACK_MODE
;
1793 spin_unlock_irq(&phba
->hbalock
);
1794 /* wait for the link attention interrupt */
1798 while (phba
->link_state
!= LPFC_HBA_READY
) {
1799 if (i
++ > timeout
) {
1812 lpfc_bsg_diag_mode_exit(phba
);
1815 * Let SLI layer release mboxq if mbox command completed after timeout.
1817 if (pmboxq
&& mbxstatus
!= MBX_TIMEOUT
)
1818 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1821 /* make error code available to userspace */
1822 job
->reply
->result
= rc
;
1823 /* complete the job back to userspace if no error */
1830 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1831 * @phba: Pointer to HBA context object.
1832 * @diag: Flag for set link to diag or nomral operation state.
1834 * This function is responsible for issuing a sli4 mailbox command for setting
1835 * link to either diag state or normal operation state.
1838 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba
*phba
, uint32_t diag
)
1840 LPFC_MBOXQ_t
*pmboxq
;
1841 struct lpfc_mbx_set_link_diag_state
*link_diag_state
;
1842 uint32_t req_len
, alloc_len
;
1843 int mbxstatus
= MBX_SUCCESS
, rc
;
1845 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1849 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_state
) -
1850 sizeof(struct lpfc_sli4_cfg_mhdr
));
1851 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
1852 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE
,
1853 req_len
, LPFC_SLI4_MBX_EMBED
);
1854 if (alloc_len
!= req_len
) {
1856 goto link_diag_state_set_out
;
1858 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
1859 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1860 diag
, phba
->sli4_hba
.lnk_info
.lnk_tp
,
1861 phba
->sli4_hba
.lnk_info
.lnk_no
);
1863 link_diag_state
= &pmboxq
->u
.mqe
.un
.link_diag_state
;
1864 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid
, &link_diag_state
->u
.req
,
1865 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE
);
1866 bf_set(lpfc_mbx_set_diag_state_link_num
, &link_diag_state
->u
.req
,
1867 phba
->sli4_hba
.lnk_info
.lnk_no
);
1868 bf_set(lpfc_mbx_set_diag_state_link_type
, &link_diag_state
->u
.req
,
1869 phba
->sli4_hba
.lnk_info
.lnk_tp
);
1871 bf_set(lpfc_mbx_set_diag_state_diag
,
1872 &link_diag_state
->u
.req
, 1);
1874 bf_set(lpfc_mbx_set_diag_state_diag
,
1875 &link_diag_state
->u
.req
, 0);
1877 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1879 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0))
1884 link_diag_state_set_out
:
1885 if (pmboxq
&& (mbxstatus
!= MBX_TIMEOUT
))
1886 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1892 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1893 * @phba: Pointer to HBA context object.
1895 * This function is responsible for issuing a sli4 mailbox command for setting
1896 * up internal loopback diagnostic.
1899 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba
*phba
)
1901 LPFC_MBOXQ_t
*pmboxq
;
1902 uint32_t req_len
, alloc_len
;
1903 struct lpfc_mbx_set_link_diag_loopback
*link_diag_loopback
;
1904 int mbxstatus
= MBX_SUCCESS
, rc
= 0;
1906 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1909 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_loopback
) -
1910 sizeof(struct lpfc_sli4_cfg_mhdr
));
1911 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
1912 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK
,
1913 req_len
, LPFC_SLI4_MBX_EMBED
);
1914 if (alloc_len
!= req_len
) {
1915 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1918 link_diag_loopback
= &pmboxq
->u
.mqe
.un
.link_diag_loopback
;
1919 bf_set(lpfc_mbx_set_diag_state_link_num
,
1920 &link_diag_loopback
->u
.req
, phba
->sli4_hba
.lnk_info
.lnk_no
);
1921 bf_set(lpfc_mbx_set_diag_state_link_type
,
1922 &link_diag_loopback
->u
.req
, phba
->sli4_hba
.lnk_info
.lnk_tp
);
1923 bf_set(lpfc_mbx_set_diag_lpbk_type
, &link_diag_loopback
->u
.req
,
1924 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL
);
1926 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1927 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
)) {
1928 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1929 "3127 Failed setup loopback mode mailbox "
1930 "command, rc:x%x, status:x%x\n", mbxstatus
,
1931 pmboxq
->u
.mb
.mbxStatus
);
1934 if (pmboxq
&& (mbxstatus
!= MBX_TIMEOUT
))
1935 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1940 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1941 * @phba: Pointer to HBA context object.
1943 * This function set up SLI4 FC port registrations for diagnostic run, which
1944 * includes all the rpis, vfi, and also vpi.
1947 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba
*phba
)
1951 if (phba
->pport
->fc_flag
& FC_VFI_REGISTERED
) {
1952 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1953 "3136 Port still had vfi registered: "
1954 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1955 phba
->pport
->fc_myDID
, phba
->fcf
.fcfi
,
1956 phba
->sli4_hba
.vfi_ids
[phba
->pport
->vfi
],
1957 phba
->vpi_ids
[phba
->pport
->vpi
]);
1960 rc
= lpfc_issue_reg_vfi(phba
->pport
);
1965 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1966 * @phba: Pointer to HBA context object.
1967 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1969 * This function is responsible for placing an sli4 port into diagnostic
1970 * loopback mode in order to perform a diagnostic loopback test.
1973 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
)
1975 struct diag_mode_set
*loopback_mode
;
1976 uint32_t link_flags
, timeout
;
1979 /* no data to return just the return code */
1980 job
->reply
->reply_payload_rcv_len
= 0;
1982 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
1983 sizeof(struct diag_mode_set
)) {
1984 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1985 "3011 Received DIAG MODE request size:%d "
1986 "below the minimum size:%d\n",
1988 (int)(sizeof(struct fc_bsg_request
) +
1989 sizeof(struct diag_mode_set
)));
1994 rc
= lpfc_bsg_diag_mode_enter(phba
);
1998 /* indicate we are in loobpack diagnostic mode */
1999 spin_lock_irq(&phba
->hbalock
);
2000 phba
->link_flag
|= LS_LOOPBACK_MODE
;
2001 spin_unlock_irq(&phba
->hbalock
);
2003 /* reset port to start frome scratch */
2004 rc
= lpfc_selective_reset(phba
);
2008 /* bring the link to diagnostic mode */
2009 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2010 "3129 Bring link to diagnostic state.\n");
2011 loopback_mode
= (struct diag_mode_set
*)
2012 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2013 link_flags
= loopback_mode
->type
;
2014 timeout
= loopback_mode
->timeout
* 100;
2016 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 1);
2018 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2019 "3130 Failed to bring link to diagnostic "
2020 "state, rc:x%x\n", rc
);
2021 goto loopback_mode_exit
;
2024 /* wait for link down before proceeding */
2026 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
2027 if (i
++ > timeout
) {
2029 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2030 "3131 Timeout waiting for link to "
2031 "diagnostic mode, timeout:%d ms\n",
2033 goto loopback_mode_exit
;
2038 /* set up loopback mode */
2039 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2040 "3132 Set up loopback mode:x%x\n", link_flags
);
2042 if (link_flags
== INTERNAL_LOOP_BACK
)
2043 rc
= lpfc_sli4_bsg_set_internal_loopback(phba
);
2044 else if (link_flags
== EXTERNAL_LOOP_BACK
)
2045 rc
= lpfc_hba_init_link_fc_topology(phba
,
2046 FLAGS_TOPOLOGY_MODE_PT_PT
,
2050 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2051 "3141 Loopback mode:x%x not supported\n",
2053 goto loopback_mode_exit
;
2057 /* wait for the link attention interrupt */
2060 while (phba
->link_state
< LPFC_LINK_UP
) {
2061 if (i
++ > timeout
) {
2063 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2064 "3137 Timeout waiting for link up "
2065 "in loopback mode, timeout:%d ms\n",
2073 /* port resource registration setup for loopback diagnostic */
2075 /* set up a none zero myDID for loopback test */
2076 phba
->pport
->fc_myDID
= 1;
2077 rc
= lpfc_sli4_diag_fcport_reg_setup(phba
);
2079 goto loopback_mode_exit
;
2082 /* wait for the port ready */
2085 while (phba
->link_state
!= LPFC_HBA_READY
) {
2086 if (i
++ > timeout
) {
2088 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2089 "3133 Timeout waiting for port "
2090 "loopback mode ready, timeout:%d ms\n",
2099 /* clear loopback diagnostic mode */
2101 spin_lock_irq(&phba
->hbalock
);
2102 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2103 spin_unlock_irq(&phba
->hbalock
);
2105 lpfc_bsg_diag_mode_exit(phba
);
2108 /* make error code available to userspace */
2109 job
->reply
->result
= rc
;
2110 /* complete the job back to userspace if no error */
2117 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2118 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2120 * This function is responsible for responding to check and dispatch bsg diag
2121 * command from the user to proper driver action routines.
2124 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job
*job
)
2126 struct Scsi_Host
*shost
;
2127 struct lpfc_vport
*vport
;
2128 struct lpfc_hba
*phba
;
2134 vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2141 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2142 rc
= lpfc_sli3_bsg_diag_loopback_mode(phba
, job
);
2143 else if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
2144 LPFC_SLI_INTF_IF_TYPE_2
)
2145 rc
= lpfc_sli4_bsg_diag_loopback_mode(phba
, job
);
2153 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2154 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2156 * This function is responsible for responding to check and dispatch bsg diag
2157 * command from the user to proper driver action routines.
2160 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job
*job
)
2162 struct Scsi_Host
*shost
;
2163 struct lpfc_vport
*vport
;
2164 struct lpfc_hba
*phba
;
2165 struct diag_mode_set
*loopback_mode_end_cmd
;
2172 vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2179 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2181 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2182 LPFC_SLI_INTF_IF_TYPE_2
)
2185 /* clear loopback diagnostic mode */
2186 spin_lock_irq(&phba
->hbalock
);
2187 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2188 spin_unlock_irq(&phba
->hbalock
);
2189 loopback_mode_end_cmd
= (struct diag_mode_set
*)
2190 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2191 timeout
= loopback_mode_end_cmd
->timeout
* 100;
2193 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 0);
2195 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2196 "3139 Failed to bring link to diagnostic "
2197 "state, rc:x%x\n", rc
);
2198 goto loopback_mode_end_exit
;
2201 /* wait for link down before proceeding */
2203 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
2204 if (i
++ > timeout
) {
2206 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2207 "3140 Timeout waiting for link to "
2208 "diagnostic mode_end, timeout:%d ms\n",
2210 /* there is nothing much we can do here */
2216 /* reset port resource registrations */
2217 rc
= lpfc_selective_reset(phba
);
2218 phba
->pport
->fc_myDID
= 0;
2220 loopback_mode_end_exit
:
2221 /* make return code available to userspace */
2222 job
->reply
->result
= rc
;
2223 /* complete the job back to userspace if no error */
2230 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2231 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2233 * This function is to perform SLI4 diag link test request from the user
2237 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job
*job
)
2239 struct Scsi_Host
*shost
;
2240 struct lpfc_vport
*vport
;
2241 struct lpfc_hba
*phba
;
2242 LPFC_MBOXQ_t
*pmboxq
;
2243 struct sli4_link_diag
*link_diag_test_cmd
;
2244 uint32_t req_len
, alloc_len
;
2246 struct lpfc_mbx_run_link_diag_test
*run_link_diag_test
;
2247 union lpfc_sli4_cfg_shdr
*shdr
;
2248 uint32_t shdr_status
, shdr_add_status
;
2249 struct diag_status
*diag_status_reply
;
2250 int mbxstatus
, rc
= 0;
2257 vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2268 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
2272 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2273 LPFC_SLI_INTF_IF_TYPE_2
) {
2278 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
2279 sizeof(struct sli4_link_diag
)) {
2280 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2281 "3013 Received LINK DIAG TEST request "
2282 " size:%d below the minimum size:%d\n",
2284 (int)(sizeof(struct fc_bsg_request
) +
2285 sizeof(struct sli4_link_diag
)));
2290 rc
= lpfc_bsg_diag_mode_enter(phba
);
2294 link_diag_test_cmd
= (struct sli4_link_diag
*)
2295 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2296 timeout
= link_diag_test_cmd
->timeout
* 100;
2298 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 1);
2303 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2306 goto link_diag_test_exit
;
2309 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_state
) -
2310 sizeof(struct lpfc_sli4_cfg_mhdr
));
2311 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
2312 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE
,
2313 req_len
, LPFC_SLI4_MBX_EMBED
);
2314 if (alloc_len
!= req_len
) {
2316 goto link_diag_test_exit
;
2318 run_link_diag_test
= &pmboxq
->u
.mqe
.un
.link_diag_test
;
2319 bf_set(lpfc_mbx_run_diag_test_link_num
, &run_link_diag_test
->u
.req
,
2320 phba
->sli4_hba
.lnk_info
.lnk_no
);
2321 bf_set(lpfc_mbx_run_diag_test_link_type
, &run_link_diag_test
->u
.req
,
2322 phba
->sli4_hba
.lnk_info
.lnk_tp
);
2323 bf_set(lpfc_mbx_run_diag_test_test_id
, &run_link_diag_test
->u
.req
,
2324 link_diag_test_cmd
->test_id
);
2325 bf_set(lpfc_mbx_run_diag_test_loops
, &run_link_diag_test
->u
.req
,
2326 link_diag_test_cmd
->loops
);
2327 bf_set(lpfc_mbx_run_diag_test_test_ver
, &run_link_diag_test
->u
.req
,
2328 link_diag_test_cmd
->test_version
);
2329 bf_set(lpfc_mbx_run_diag_test_err_act
, &run_link_diag_test
->u
.req
,
2330 link_diag_test_cmd
->error_action
);
2332 mbxstatus
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
2334 shdr
= (union lpfc_sli4_cfg_shdr
*)
2335 &pmboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
2336 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
2337 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
2338 if (shdr_status
|| shdr_add_status
|| mbxstatus
) {
2339 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2340 "3010 Run link diag test mailbox failed with "
2341 "mbx_status x%x status x%x, add_status x%x\n",
2342 mbxstatus
, shdr_status
, shdr_add_status
);
2345 diag_status_reply
= (struct diag_status
*)
2346 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2348 if (job
->reply_len
<
2349 sizeof(struct fc_bsg_request
) + sizeof(struct diag_status
)) {
2350 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2351 "3012 Received Run link diag test reply "
2352 "below minimum size (%d): reply_len:%d\n",
2353 (int)(sizeof(struct fc_bsg_request
) +
2354 sizeof(struct diag_status
)),
2360 diag_status_reply
->mbox_status
= mbxstatus
;
2361 diag_status_reply
->shdr_status
= shdr_status
;
2362 diag_status_reply
->shdr_add_status
= shdr_add_status
;
2364 link_diag_test_exit
:
2365 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 0);
2368 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2370 lpfc_bsg_diag_mode_exit(phba
);
2373 /* make error code available to userspace */
2374 job
->reply
->result
= rc
;
2375 /* complete the job back to userspace if no error */
2382 * lpfcdiag_loop_self_reg - obtains a remote port login id
2383 * @phba: Pointer to HBA context object
2384 * @rpi: Pointer to a remote port login id
2386 * This function obtains a remote port login id so the diag loopback test
2387 * can send and receive its own unsolicited CT command.
2389 static int lpfcdiag_loop_self_reg(struct lpfc_hba
*phba
, uint16_t *rpi
)
2392 struct lpfc_dmabuf
*dmabuff
;
2395 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2399 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2400 status
= lpfc_reg_rpi(phba
, 0, phba
->pport
->fc_myDID
,
2401 (uint8_t *)&phba
->pport
->fc_sparam
,
2404 *rpi
= lpfc_sli4_alloc_rpi(phba
);
2405 status
= lpfc_reg_rpi(phba
, phba
->pport
->vpi
,
2406 phba
->pport
->fc_myDID
,
2407 (uint8_t *)&phba
->pport
->fc_sparam
,
2412 mempool_free(mbox
, phba
->mbox_mem_pool
);
2413 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2414 lpfc_sli4_free_rpi(phba
, *rpi
);
2418 dmabuff
= (struct lpfc_dmabuf
*) mbox
->context1
;
2419 mbox
->context1
= NULL
;
2420 mbox
->context2
= NULL
;
2421 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2423 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
2424 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
2426 if (status
!= MBX_TIMEOUT
)
2427 mempool_free(mbox
, phba
->mbox_mem_pool
);
2428 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2429 lpfc_sli4_free_rpi(phba
, *rpi
);
2433 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2434 *rpi
= mbox
->u
.mb
.un
.varWords
[0];
2436 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
2438 mempool_free(mbox
, phba
->mbox_mem_pool
);
2443 * lpfcdiag_loop_self_unreg - unregs from the rpi
2444 * @phba: Pointer to HBA context object
2445 * @rpi: Remote port login id
2447 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2449 static int lpfcdiag_loop_self_unreg(struct lpfc_hba
*phba
, uint16_t rpi
)
2454 /* Allocate mboxq structure */
2455 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2459 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2460 lpfc_unreg_login(phba
, 0, rpi
, mbox
);
2462 lpfc_unreg_login(phba
, phba
->pport
->vpi
,
2463 phba
->sli4_hba
.rpi_ids
[rpi
], mbox
);
2465 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2467 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
2468 if (status
!= MBX_TIMEOUT
)
2469 mempool_free(mbox
, phba
->mbox_mem_pool
);
2472 mempool_free(mbox
, phba
->mbox_mem_pool
);
2473 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2474 lpfc_sli4_free_rpi(phba
, rpi
);
2479 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2480 * @phba: Pointer to HBA context object
2481 * @rpi: Remote port login id
2482 * @txxri: Pointer to transmit exchange id
2483 * @rxxri: Pointer to response exchabge id
2485 * This function obtains the transmit and receive ids required to send
2486 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2487 * flags are used to the unsolicted response handler is able to process
2488 * the ct command sent on the same port.
2490 static int lpfcdiag_loop_get_xri(struct lpfc_hba
*phba
, uint16_t rpi
,
2491 uint16_t *txxri
, uint16_t * rxxri
)
2493 struct lpfc_bsg_event
*evt
;
2494 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2496 struct lpfc_dmabuf
*dmabuf
;
2497 struct ulp_bde64
*bpl
= NULL
;
2498 struct lpfc_sli_ct_request
*ctreq
= NULL
;
2502 unsigned long flags
;
2506 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
2507 SLI_CT_ELX_LOOPBACK
);
2511 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2512 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
2513 lpfc_bsg_event_ref(evt
);
2514 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2516 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2517 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2519 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2521 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, 0, &dmabuf
->phys
);
2523 INIT_LIST_HEAD(&dmabuf
->list
);
2524 bpl
= (struct ulp_bde64
*) dmabuf
->virt
;
2525 memset(bpl
, 0, sizeof(*bpl
));
2526 ctreq
= (struct lpfc_sli_ct_request
*)(bpl
+ 1);
2528 le32_to_cpu(putPaddrHigh(dmabuf
->phys
+
2531 le32_to_cpu(putPaddrLow(dmabuf
->phys
+
2533 bpl
->tus
.f
.bdeFlags
= 0;
2534 bpl
->tus
.f
.bdeSize
= ELX_LOOPBACK_HEADER_SZ
;
2535 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
2539 if (cmdiocbq
== NULL
|| rspiocbq
== NULL
||
2540 dmabuf
== NULL
|| bpl
== NULL
|| ctreq
== NULL
||
2541 dmabuf
->virt
== NULL
) {
2543 goto err_get_xri_exit
;
2546 cmd
= &cmdiocbq
->iocb
;
2547 rsp
= &rspiocbq
->iocb
;
2549 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
2551 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
2552 ctreq
->RevisionId
.bits
.InId
= 0;
2553 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
2554 ctreq
->FsSubType
= 0;
2555 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_XRI_SETUP
;
2556 ctreq
->CommandResponse
.bits
.Size
= 0;
2559 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(dmabuf
->phys
);
2560 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(dmabuf
->phys
);
2561 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2562 cmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(*bpl
);
2564 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= LA
;
2565 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
2566 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
2567 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
2569 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
2570 cmd
->ulpBdeCount
= 1;
2572 cmd
->ulpClass
= CLASS3
;
2573 cmd
->ulpContext
= rpi
;
2575 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2576 cmdiocbq
->vport
= phba
->pport
;
2578 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
2580 (phba
->fc_ratov
* 2)
2581 + LPFC_DRVR_TIMEOUT
);
2584 goto err_get_xri_exit
;
2586 *txxri
= rsp
->ulpContext
;
2589 evt
->wait_time_stamp
= jiffies
;
2590 time_left
= wait_event_interruptible_timeout(
2591 evt
->wq
, !list_empty(&evt
->events_to_see
),
2592 msecs_to_jiffies(1000 *
2593 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
)));
2594 if (list_empty(&evt
->events_to_see
))
2595 ret_val
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
2597 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2598 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
2599 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2600 *rxxri
= (list_entry(evt
->events_to_get
.prev
,
2601 typeof(struct event_data
),
2607 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2608 lpfc_bsg_event_unref(evt
); /* release ref */
2609 lpfc_bsg_event_unref(evt
); /* delete */
2610 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2614 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2618 if (cmdiocbq
&& (iocb_stat
!= IOCB_TIMEDOUT
))
2619 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2621 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2626 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2627 * @phba: Pointer to HBA context object
2629 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2630 * retruns the pointer to the buffer.
2632 static struct lpfc_dmabuf
*
2633 lpfc_bsg_dma_page_alloc(struct lpfc_hba
*phba
)
2635 struct lpfc_dmabuf
*dmabuf
;
2636 struct pci_dev
*pcidev
= phba
->pcidev
;
2638 /* allocate dma buffer struct */
2639 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2643 INIT_LIST_HEAD(&dmabuf
->list
);
2645 /* now, allocate dma buffer */
2646 dmabuf
->virt
= dma_alloc_coherent(&pcidev
->dev
, BSG_MBOX_SIZE
,
2647 &(dmabuf
->phys
), GFP_KERNEL
);
2649 if (!dmabuf
->virt
) {
2653 memset((uint8_t *)dmabuf
->virt
, 0, BSG_MBOX_SIZE
);
2659 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2660 * @phba: Pointer to HBA context object.
2661 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2663 * This routine just simply frees a dma buffer and its associated buffer
2664 * descriptor referred by @dmabuf.
2667 lpfc_bsg_dma_page_free(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*dmabuf
)
2669 struct pci_dev
*pcidev
= phba
->pcidev
;
2675 dma_free_coherent(&pcidev
->dev
, BSG_MBOX_SIZE
,
2676 dmabuf
->virt
, dmabuf
->phys
);
2682 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2683 * @phba: Pointer to HBA context object.
2684 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2686 * This routine just simply frees all dma buffers and their associated buffer
2687 * descriptors referred by @dmabuf_list.
2690 lpfc_bsg_dma_page_list_free(struct lpfc_hba
*phba
,
2691 struct list_head
*dmabuf_list
)
2693 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
2695 if (list_empty(dmabuf_list
))
2698 list_for_each_entry_safe(dmabuf
, next_dmabuf
, dmabuf_list
, list
) {
2699 list_del_init(&dmabuf
->list
);
2700 lpfc_bsg_dma_page_free(phba
, dmabuf
);
2706 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2707 * @phba: Pointer to HBA context object
2708 * @bpl: Pointer to 64 bit bde structure
2709 * @size: Number of bytes to process
2710 * @nocopydata: Flag to copy user data into the allocated buffer
2712 * This function allocates page size buffers and populates an lpfc_dmabufext.
2713 * If allowed the user data pointed to with indataptr is copied into the kernel
2714 * memory. The chained list of page size buffers is returned.
2716 static struct lpfc_dmabufext
*
2717 diag_cmd_data_alloc(struct lpfc_hba
*phba
,
2718 struct ulp_bde64
*bpl
, uint32_t size
,
2721 struct lpfc_dmabufext
*mlist
= NULL
;
2722 struct lpfc_dmabufext
*dmp
;
2723 int cnt
, offset
= 0, i
= 0;
2724 struct pci_dev
*pcidev
;
2726 pcidev
= phba
->pcidev
;
2729 /* We get chunks of 4K */
2730 if (size
> BUF_SZ_4K
)
2735 /* allocate struct lpfc_dmabufext buffer header */
2736 dmp
= kmalloc(sizeof(struct lpfc_dmabufext
), GFP_KERNEL
);
2740 INIT_LIST_HEAD(&dmp
->dma
.list
);
2742 /* Queue it to a linked list */
2744 list_add_tail(&dmp
->dma
.list
, &mlist
->dma
.list
);
2748 /* allocate buffer */
2749 dmp
->dma
.virt
= dma_alloc_coherent(&pcidev
->dev
,
2760 bpl
->tus
.f
.bdeFlags
= 0;
2761 pci_dma_sync_single_for_device(phba
->pcidev
,
2762 dmp
->dma
.phys
, LPFC_BPL_SIZE
, PCI_DMA_TODEVICE
);
2765 memset((uint8_t *)dmp
->dma
.virt
, 0, cnt
);
2766 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
2769 /* build buffer ptr list for IOCB */
2770 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dmp
->dma
.phys
));
2771 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dmp
->dma
.phys
));
2772 bpl
->tus
.f
.bdeSize
= (ushort
) cnt
;
2773 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
2784 diag_cmd_data_free(phba
, mlist
);
2789 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2790 * @phba: Pointer to HBA context object
2791 * @rxxri: Receive exchange id
2792 * @len: Number of data bytes
2794 * This function allocates and posts a data buffer of sufficient size to receive
2795 * an unsolicted CT command.
2797 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba
*phba
, uint16_t rxxri
,
2800 struct lpfc_sli
*psli
= &phba
->sli
;
2801 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_ELS_RING
];
2802 struct lpfc_iocbq
*cmdiocbq
;
2804 struct list_head head
, *curr
, *next
;
2805 struct lpfc_dmabuf
*rxbmp
;
2806 struct lpfc_dmabuf
*dmp
;
2807 struct lpfc_dmabuf
*mp
[2] = {NULL
, NULL
};
2808 struct ulp_bde64
*rxbpl
= NULL
;
2810 struct lpfc_dmabufext
*rxbuffer
= NULL
;
2815 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2816 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2817 if (rxbmp
!= NULL
) {
2818 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2820 INIT_LIST_HEAD(&rxbmp
->list
);
2821 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2822 rxbuffer
= diag_cmd_data_alloc(phba
, rxbpl
, len
, 0);
2826 if (!cmdiocbq
|| !rxbmp
|| !rxbpl
|| !rxbuffer
) {
2828 goto err_post_rxbufs_exit
;
2831 /* Queue buffers for the receive exchange */
2832 num_bde
= (uint32_t)rxbuffer
->flag
;
2833 dmp
= &rxbuffer
->dma
;
2835 cmd
= &cmdiocbq
->iocb
;
2838 INIT_LIST_HEAD(&head
);
2839 list_add_tail(&head
, &dmp
->list
);
2840 list_for_each_safe(curr
, next
, &head
) {
2841 mp
[i
] = list_entry(curr
, struct lpfc_dmabuf
, list
);
2844 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2845 mp
[i
]->buffer_tag
= lpfc_sli_get_buffer_tag(phba
);
2846 cmd
->un
.quexri64cx
.buff
.bde
.addrHigh
=
2847 putPaddrHigh(mp
[i
]->phys
);
2848 cmd
->un
.quexri64cx
.buff
.bde
.addrLow
=
2849 putPaddrLow(mp
[i
]->phys
);
2850 cmd
->un
.quexri64cx
.buff
.bde
.tus
.f
.bdeSize
=
2851 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2852 cmd
->un
.quexri64cx
.buff
.buffer_tag
= mp
[i
]->buffer_tag
;
2853 cmd
->ulpCommand
= CMD_QUE_XRI64_CX
;
2856 cmd
->ulpBdeCount
= 1;
2857 cmd
->unsli3
.que_xri64cx_ext_words
.ebde_count
= 0;
2860 cmd
->un
.cont64
[i
].addrHigh
= putPaddrHigh(mp
[i
]->phys
);
2861 cmd
->un
.cont64
[i
].addrLow
= putPaddrLow(mp
[i
]->phys
);
2862 cmd
->un
.cont64
[i
].tus
.f
.bdeSize
=
2863 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2864 cmd
->ulpBdeCount
= ++i
;
2866 if ((--num_bde
> 0) && (i
< 2))
2869 cmd
->ulpCommand
= CMD_QUE_XRI_BUF64_CX
;
2873 cmd
->ulpClass
= CLASS3
;
2874 cmd
->ulpContext
= rxxri
;
2876 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
2878 if (iocb_stat
== IOCB_ERROR
) {
2879 diag_cmd_data_free(phba
,
2880 (struct lpfc_dmabufext
*)mp
[0]);
2882 diag_cmd_data_free(phba
,
2883 (struct lpfc_dmabufext
*)mp
[1]);
2884 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2886 goto err_post_rxbufs_exit
;
2889 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[0]);
2891 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[1]);
2895 /* The iocb was freed by lpfc_sli_issue_iocb */
2896 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2898 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2900 goto err_post_rxbufs_exit
;
2903 cmd
= &cmdiocbq
->iocb
;
2908 err_post_rxbufs_exit
:
2912 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
2917 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2922 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2923 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2925 * This function receives a user data buffer to be transmitted and received on
2926 * the same port, the link must be up and in loopback mode prior
2928 * 1. A kernel buffer is allocated to copy the user data into.
2929 * 2. The port registers with "itself".
2930 * 3. The transmit and receive exchange ids are obtained.
2931 * 4. The receive exchange id is posted.
2932 * 5. A new els loopback event is created.
2933 * 6. The command and response iocbs are allocated.
2934 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2936 * This function is meant to be called n times while the port is in loopback
2937 * so it is the apps responsibility to issue a reset to take the port out
2941 lpfc_bsg_diag_loopback_run(struct fc_bsg_job
*job
)
2943 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2944 struct lpfc_hba
*phba
= vport
->phba
;
2945 struct diag_mode_test
*diag_mode
;
2946 struct lpfc_bsg_event
*evt
;
2947 struct event_data
*evdat
;
2948 struct lpfc_sli
*psli
= &phba
->sli
;
2951 size_t segment_len
= 0, segment_offset
= 0, current_offset
= 0;
2953 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
= NULL
;
2954 IOCB_t
*cmd
, *rsp
= NULL
;
2955 struct lpfc_sli_ct_request
*ctreq
;
2956 struct lpfc_dmabuf
*txbmp
;
2957 struct ulp_bde64
*txbpl
= NULL
;
2958 struct lpfc_dmabufext
*txbuffer
= NULL
;
2959 struct list_head head
;
2960 struct lpfc_dmabuf
*curr
;
2961 uint16_t txxri
= 0, rxxri
;
2963 uint8_t *ptr
= NULL
, *rx_databuf
= NULL
;
2967 unsigned long flags
;
2968 void *dataout
= NULL
;
2971 /* in case no data is returned return just the return code */
2972 job
->reply
->reply_payload_rcv_len
= 0;
2974 if (job
->request_len
<
2975 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_test
)) {
2976 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2977 "2739 Received DIAG TEST request below minimum "
2980 goto loopback_test_exit
;
2983 if (job
->request_payload
.payload_len
!=
2984 job
->reply_payload
.payload_len
) {
2986 goto loopback_test_exit
;
2988 diag_mode
= (struct diag_mode_test
*)
2989 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2991 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
2992 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
2993 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
2995 goto loopback_test_exit
;
2998 if (!lpfc_is_link_up(phba
) || !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
3000 goto loopback_test_exit
;
3003 size
= job
->request_payload
.payload_len
;
3004 full_size
= size
+ ELX_LOOPBACK_HEADER_SZ
; /* plus the header */
3006 if ((size
== 0) || (size
> 80 * BUF_SZ_4K
)) {
3008 goto loopback_test_exit
;
3011 if (full_size
>= BUF_SZ_4K
) {
3013 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3014 * then we allocate 64k and re-use that buffer over and over to
3015 * xfer the whole block. This is because Linux kernel has a
3016 * problem allocating more than 120k of kernel space memory. Saw
3017 * problem with GET_FCPTARGETMAPPING...
3019 if (size
<= (64 * 1024))
3020 total_mem
= full_size
;
3022 total_mem
= 64 * 1024;
3024 /* Allocate memory for ioctl data */
3025 total_mem
= BUF_SZ_4K
;
3027 dataout
= kmalloc(total_mem
, GFP_KERNEL
);
3028 if (dataout
== NULL
) {
3030 goto loopback_test_exit
;
3034 ptr
+= ELX_LOOPBACK_HEADER_SZ
;
3035 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3036 job
->request_payload
.sg_cnt
,
3038 rc
= lpfcdiag_loop_self_reg(phba
, &rpi
);
3040 goto loopback_test_exit
;
3042 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3043 rc
= lpfcdiag_loop_get_xri(phba
, rpi
, &txxri
, &rxxri
);
3045 lpfcdiag_loop_self_unreg(phba
, rpi
);
3046 goto loopback_test_exit
;
3049 rc
= lpfcdiag_loop_post_rxbufs(phba
, rxxri
, full_size
);
3051 lpfcdiag_loop_self_unreg(phba
, rpi
);
3052 goto loopback_test_exit
;
3055 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
3056 SLI_CT_ELX_LOOPBACK
);
3058 lpfcdiag_loop_self_unreg(phba
, rpi
);
3060 goto loopback_test_exit
;
3063 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3064 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
3065 lpfc_bsg_event_ref(evt
);
3066 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3068 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
3069 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3070 rspiocbq
= lpfc_sli_get_iocbq(phba
);
3071 txbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3074 txbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &txbmp
->phys
);
3076 INIT_LIST_HEAD(&txbmp
->list
);
3077 txbpl
= (struct ulp_bde64
*) txbmp
->virt
;
3078 txbuffer
= diag_cmd_data_alloc(phba
,
3079 txbpl
, full_size
, 0);
3083 if (!cmdiocbq
|| !txbmp
|| !txbpl
|| !txbuffer
|| !txbmp
->virt
) {
3085 goto err_loopback_test_exit
;
3087 if ((phba
->sli_rev
< LPFC_SLI_REV4
) && !rspiocbq
) {
3089 goto err_loopback_test_exit
;
3092 cmd
= &cmdiocbq
->iocb
;
3093 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3094 rsp
= &rspiocbq
->iocb
;
3096 INIT_LIST_HEAD(&head
);
3097 list_add_tail(&head
, &txbuffer
->dma
.list
);
3098 list_for_each_entry(curr
, &head
, list
) {
3099 segment_len
= ((struct lpfc_dmabufext
*)curr
)->size
;
3100 if (current_offset
== 0) {
3102 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
3103 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
3104 ctreq
->RevisionId
.bits
.InId
= 0;
3105 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
3106 ctreq
->FsSubType
= 0;
3107 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_DATA
;
3108 ctreq
->CommandResponse
.bits
.Size
= size
;
3109 segment_offset
= ELX_LOOPBACK_HEADER_SZ
;
3113 BUG_ON(segment_offset
>= segment_len
);
3114 memcpy(curr
->virt
+ segment_offset
,
3115 ptr
+ current_offset
,
3116 segment_len
- segment_offset
);
3118 current_offset
+= segment_len
- segment_offset
;
3119 BUG_ON(current_offset
> size
);
3123 /* Build the XMIT_SEQUENCE iocb */
3124 num_bde
= (uint32_t)txbuffer
->flag
;
3126 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(txbmp
->phys
);
3127 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(txbmp
->phys
);
3128 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
3129 cmd
->un
.xseq64
.bdl
.bdeSize
= (num_bde
* sizeof(struct ulp_bde64
));
3131 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
3132 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
3133 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
3134 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
3136 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
3137 cmd
->ulpBdeCount
= 1;
3139 cmd
->ulpClass
= CLASS3
;
3141 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3142 cmd
->ulpContext
= txxri
;
3144 cmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
3145 cmd
->un
.ulpWord
[3] = phba
->sli4_hba
.rpi_ids
[rpi
];
3146 cmdiocbq
->context3
= txbmp
;
3147 cmdiocbq
->sli4_xritag
= NO_XRI
;
3148 cmd
->unsli3
.rcvsli3
.ox_id
= 0xffff;
3150 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
3151 cmdiocbq
->vport
= phba
->pport
;
3152 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
3153 rspiocbq
, (phba
->fc_ratov
* 2) +
3156 if ((iocb_stat
!= IOCB_SUCCESS
) || ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3157 (rsp
->ulpStatus
!= IOCB_SUCCESS
))) {
3158 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3159 "3126 Failed loopback test issue iocb: "
3160 "iocb_stat:x%x\n", iocb_stat
);
3162 goto err_loopback_test_exit
;
3166 time_left
= wait_event_interruptible_timeout(
3167 evt
->wq
, !list_empty(&evt
->events_to_see
),
3168 msecs_to_jiffies(1000 *
3169 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
)));
3171 if (list_empty(&evt
->events_to_see
)) {
3172 rc
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
3173 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3174 "3125 Not receiving unsolicited event, "
3177 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3178 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
3179 evdat
= list_entry(evt
->events_to_get
.prev
,
3180 typeof(*evdat
), node
);
3181 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3182 rx_databuf
= evdat
->data
;
3183 if (evdat
->len
!= full_size
) {
3184 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3185 "1603 Loopback test did not receive expected "
3186 "data length. actual length 0x%x expected "
3188 evdat
->len
, full_size
);
3190 } else if (rx_databuf
== NULL
)
3194 /* skip over elx loopback header */
3195 rx_databuf
+= ELX_LOOPBACK_HEADER_SZ
;
3196 job
->reply
->reply_payload_rcv_len
=
3197 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3198 job
->reply_payload
.sg_cnt
,
3200 job
->reply
->reply_payload_rcv_len
= size
;
3204 err_loopback_test_exit
:
3205 lpfcdiag_loop_self_unreg(phba
, rpi
);
3207 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3208 lpfc_bsg_event_unref(evt
); /* release ref */
3209 lpfc_bsg_event_unref(evt
); /* delete */
3210 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3212 if (cmdiocbq
!= NULL
)
3213 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3215 if (rspiocbq
!= NULL
)
3216 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3218 if (txbmp
!= NULL
) {
3219 if (txbpl
!= NULL
) {
3220 if (txbuffer
!= NULL
)
3221 diag_cmd_data_free(phba
, txbuffer
);
3222 lpfc_mbuf_free(phba
, txbmp
->virt
, txbmp
->phys
);
3229 /* make error code available to userspace */
3230 job
->reply
->result
= rc
;
3231 job
->dd_data
= NULL
;
3232 /* complete the job back to userspace if no error */
3233 if (rc
== IOCB_SUCCESS
)
3239 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3240 * @job: GET_DFC_REV fc_bsg_job
3243 lpfc_bsg_get_dfc_rev(struct fc_bsg_job
*job
)
3245 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3246 struct lpfc_hba
*phba
= vport
->phba
;
3247 struct get_mgmt_rev
*event_req
;
3248 struct get_mgmt_rev_reply
*event_reply
;
3251 if (job
->request_len
<
3252 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev
)) {
3253 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3254 "2740 Received GET_DFC_REV request below "
3260 event_req
= (struct get_mgmt_rev
*)
3261 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
3263 event_reply
= (struct get_mgmt_rev_reply
*)
3264 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
3266 if (job
->reply_len
<
3267 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev_reply
)) {
3268 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3269 "2741 Received GET_DFC_REV reply below "
3275 event_reply
->info
.a_Major
= MANAGEMENT_MAJOR_REV
;
3276 event_reply
->info
.a_Minor
= MANAGEMENT_MINOR_REV
;
3278 job
->reply
->result
= rc
;
3285 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3286 * @phba: Pointer to HBA context object.
3287 * @pmboxq: Pointer to mailbox command.
3289 * This is completion handler function for mailbox commands issued from
3290 * lpfc_bsg_issue_mbox function. This function is called by the
3291 * mailbox event handler function with no lock held. This function
3292 * will wake up thread waiting on the wait queue pointed by context1
3296 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3298 struct bsg_job_data
*dd_data
;
3299 struct fc_bsg_job
*job
;
3301 unsigned long flags
;
3302 uint8_t *pmb
, *pmb_buf
;
3304 dd_data
= pmboxq
->context1
;
3307 * The outgoing buffer is readily referred from the dma buffer,
3308 * just need to get header part from mailboxq structure.
3310 pmb
= (uint8_t *)&pmboxq
->u
.mb
;
3311 pmb_buf
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
3312 memcpy(pmb_buf
, pmb
, sizeof(MAILBOX_t
));
3314 /* Determine if job has been aborted */
3316 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3317 job
= dd_data
->set_job
;
3319 /* Prevent timeout handling from trying to abort job */
3320 job
->dd_data
= NULL
;
3322 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3324 /* Copy the mailbox data to the job if it is still active */
3327 size
= job
->reply_payload
.payload_len
;
3328 job
->reply
->reply_payload_rcv_len
=
3329 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3330 job
->reply_payload
.sg_cnt
,
3334 dd_data
->set_job
= NULL
;
3335 mempool_free(dd_data
->context_un
.mbox
.pmboxq
, phba
->mbox_mem_pool
);
3336 lpfc_bsg_dma_page_free(phba
, dd_data
->context_un
.mbox
.dmabuffers
);
3339 /* Complete the job if the job is still active */
3342 job
->reply
->result
= 0;
3349 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3350 * @phba: Pointer to HBA context object.
3351 * @mb: Pointer to a mailbox object.
3352 * @vport: Pointer to a vport object.
3354 * Some commands require the port to be offline, some may not be called from
3357 static int lpfc_bsg_check_cmd_access(struct lpfc_hba
*phba
,
3358 MAILBOX_t
*mb
, struct lpfc_vport
*vport
)
3360 /* return negative error values for bsg job */
3361 switch (mb
->mbxCommand
) {
3365 case MBX_CONFIG_LINK
:
3366 case MBX_CONFIG_RING
:
3367 case MBX_RESET_RING
:
3368 case MBX_UNREG_LOGIN
:
3370 case MBX_DUMP_CONTEXT
:
3374 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
)) {
3375 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3376 "2743 Command 0x%x is illegal in on-line "
3382 case MBX_WRITE_VPARMS
:
3385 case MBX_READ_CONFIG
:
3386 case MBX_READ_RCONFIG
:
3387 case MBX_READ_STATUS
:
3390 case MBX_READ_LNK_STAT
:
3391 case MBX_DUMP_MEMORY
:
3393 case MBX_UPDATE_CFG
:
3394 case MBX_KILL_BOARD
:
3396 case MBX_LOAD_EXP_ROM
:
3398 case MBX_DEL_LD_ENTRY
:
3401 case MBX_SLI4_CONFIG
:
3402 case MBX_READ_EVENT_LOG
:
3403 case MBX_READ_EVENT_LOG_STATUS
:
3404 case MBX_WRITE_EVENT_LOG
:
3405 case MBX_PORT_CAPABILITIES
:
3406 case MBX_PORT_IOV_CONTROL
:
3407 case MBX_RUN_BIU_DIAG64
:
3409 case MBX_SET_VARIABLE
:
3410 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3411 "1226 mbox: set_variable 0x%x, 0x%x\n",
3413 mb
->un
.varWords
[1]);
3414 if ((mb
->un
.varWords
[0] == SETVAR_MLOMNT
)
3415 && (mb
->un
.varWords
[1] == 1)) {
3416 phba
->wait_4_mlo_maint_flg
= 1;
3417 } else if (mb
->un
.varWords
[0] == SETVAR_MLORST
) {
3418 spin_lock_irq(&phba
->hbalock
);
3419 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
3420 spin_unlock_irq(&phba
->hbalock
);
3421 phba
->fc_topology
= LPFC_TOPOLOGY_PT_PT
;
3424 case MBX_READ_SPARM64
:
3425 case MBX_READ_TOPOLOGY
:
3427 case MBX_REG_LOGIN64
:
3428 case MBX_CONFIG_PORT
:
3429 case MBX_RUN_BIU_DIAG
:
3431 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3432 "2742 Unknown Command 0x%x\n",
3441 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3442 * @phba: Pointer to HBA context object.
3444 * This is routine clean up and reset BSG handling of multi-buffer mbox
3448 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba
*phba
)
3450 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_IDLE
)
3453 /* free all memory, including dma buffers */
3454 lpfc_bsg_dma_page_list_free(phba
,
3455 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3456 lpfc_bsg_dma_page_free(phba
, phba
->mbox_ext_buf_ctx
.mbx_dmabuf
);
3457 /* multi-buffer write mailbox command pass-through complete */
3458 memset((char *)&phba
->mbox_ext_buf_ctx
, 0,
3459 sizeof(struct lpfc_mbox_ext_buf_ctx
));
3460 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3466 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3467 * @phba: Pointer to HBA context object.
3468 * @pmboxq: Pointer to mailbox command.
3470 * This is routine handles BSG job for mailbox commands completions with
3471 * multiple external buffers.
3473 static struct fc_bsg_job
*
3474 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3476 struct bsg_job_data
*dd_data
;
3477 struct fc_bsg_job
*job
;
3478 uint8_t *pmb
, *pmb_buf
;
3479 unsigned long flags
;
3482 struct lpfc_dmabuf
*dmabuf
;
3483 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3486 dd_data
= pmboxq
->context1
;
3488 /* Determine if job has been aborted */
3489 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3490 job
= dd_data
->set_job
;
3492 /* Prevent timeout handling from trying to abort job */
3493 job
->dd_data
= NULL
;
3495 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3498 * The outgoing buffer is readily referred from the dma buffer,
3499 * just need to get header part from mailboxq structure.
3502 pmb
= (uint8_t *)&pmboxq
->u
.mb
;
3503 pmb_buf
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
3504 /* Copy the byte swapped response mailbox back to the user */
3505 memcpy(pmb_buf
, pmb
, sizeof(MAILBOX_t
));
3506 /* if there is any non-embedded extended data copy that too */
3507 dmabuf
= phba
->mbox_ext_buf_ctx
.mbx_dmabuf
;
3508 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
3509 if (!bsg_bf_get(lpfc_mbox_hdr_emb
,
3510 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) {
3511 pmbx
= (uint8_t *)dmabuf
->virt
;
3512 /* byte swap the extended data following the mailbox command */
3513 lpfc_sli_pcimem_bcopy(&pmbx
[sizeof(MAILBOX_t
)],
3514 &pmbx
[sizeof(MAILBOX_t
)],
3515 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.mse
[0].buf_len
);
3518 /* Complete the job if the job is still active */
3521 size
= job
->reply_payload
.payload_len
;
3522 job
->reply
->reply_payload_rcv_len
=
3523 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3524 job
->reply_payload
.sg_cnt
,
3527 /* result for successful */
3528 job
->reply
->result
= 0;
3530 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3531 "2937 SLI_CONFIG ext-buffer maibox command "
3532 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3533 phba
->mbox_ext_buf_ctx
.nembType
,
3534 phba
->mbox_ext_buf_ctx
.mboxType
, size
);
3535 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
,
3536 phba
->mbox_ext_buf_ctx
.nembType
,
3537 phba
->mbox_ext_buf_ctx
.mboxType
,
3538 dma_ebuf
, sta_pos_addr
,
3539 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
, 0);
3541 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3542 "2938 SLI_CONFIG ext-buffer maibox "
3543 "command (x%x/x%x) failure, rc:x%x\n",
3544 phba
->mbox_ext_buf_ctx
.nembType
,
3545 phba
->mbox_ext_buf_ctx
.mboxType
, rc
);
3550 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_DONE
;
3556 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3557 * @phba: Pointer to HBA context object.
3558 * @pmboxq: Pointer to mailbox command.
3560 * This is completion handler function for mailbox read commands with multiple
3564 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3566 struct fc_bsg_job
*job
;
3568 job
= lpfc_bsg_issue_mbox_ext_handle_job(phba
, pmboxq
);
3570 /* handle the BSG job with mailbox command */
3572 pmboxq
->u
.mb
.mbxStatus
= MBXERR_ERROR
;
3574 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3575 "2939 SLI_CONFIG ext-buffer rd maibox command "
3576 "complete, ctxState:x%x, mbxStatus:x%x\n",
3577 phba
->mbox_ext_buf_ctx
.state
, pmboxq
->u
.mb
.mbxStatus
);
3579 if (pmboxq
->u
.mb
.mbxStatus
|| phba
->mbox_ext_buf_ctx
.numBuf
== 1)
3580 lpfc_bsg_mbox_ext_session_reset(phba
);
3582 /* free base driver mailbox structure memory */
3583 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3585 /* if the job is still active, call job done */
3593 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3594 * @phba: Pointer to HBA context object.
3595 * @pmboxq: Pointer to mailbox command.
3597 * This is completion handler function for mailbox write commands with multiple
3601 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3603 struct fc_bsg_job
*job
;
3605 job
= lpfc_bsg_issue_mbox_ext_handle_job(phba
, pmboxq
);
3607 /* handle the BSG job with the mailbox command */
3609 pmboxq
->u
.mb
.mbxStatus
= MBXERR_ERROR
;
3611 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3612 "2940 SLI_CONFIG ext-buffer wr maibox command "
3613 "complete, ctxState:x%x, mbxStatus:x%x\n",
3614 phba
->mbox_ext_buf_ctx
.state
, pmboxq
->u
.mb
.mbxStatus
);
3616 /* free all memory, including dma buffers */
3617 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3618 lpfc_bsg_mbox_ext_session_reset(phba
);
3620 /* if the job is still active, call job done */
3628 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba
*phba
, enum nemb_type nemb_tp
,
3629 uint32_t index
, struct lpfc_dmabuf
*mbx_dmabuf
,
3630 struct lpfc_dmabuf
*ext_dmabuf
)
3632 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3634 /* pointer to the start of mailbox command */
3635 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)mbx_dmabuf
->virt
;
3637 if (nemb_tp
== nemb_mse
) {
3639 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3641 putPaddrHigh(mbx_dmabuf
->phys
+
3643 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3645 putPaddrLow(mbx_dmabuf
->phys
+
3647 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3648 "2943 SLI_CONFIG(mse)[%d], "
3649 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3651 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3653 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3655 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3658 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3660 putPaddrHigh(ext_dmabuf
->phys
);
3661 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3663 putPaddrLow(ext_dmabuf
->phys
);
3664 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3665 "2944 SLI_CONFIG(mse)[%d], "
3666 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3668 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3670 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3672 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3677 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3679 putPaddrHigh(mbx_dmabuf
->phys
+
3681 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3683 putPaddrLow(mbx_dmabuf
->phys
+
3685 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3686 "3007 SLI_CONFIG(hbd)[%d], "
3687 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3689 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
3691 sli_config_emb1_subsys
.hbd
[index
]),
3692 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3694 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3698 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3700 putPaddrHigh(ext_dmabuf
->phys
);
3701 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3703 putPaddrLow(ext_dmabuf
->phys
);
3704 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3705 "3008 SLI_CONFIG(hbd)[%d], "
3706 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3708 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
3710 sli_config_emb1_subsys
.hbd
[index
]),
3711 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3713 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3721 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3722 * @phba: Pointer to HBA context object.
3723 * @mb: Pointer to a BSG mailbox object.
3724 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3725 * @dmabuff: Pointer to a DMA buffer descriptor.
3727 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3728 * non-embedded external bufffers.
3731 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
3732 enum nemb_type nemb_tp
,
3733 struct lpfc_dmabuf
*dmabuf
)
3735 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3736 struct dfc_mbox_req
*mbox_req
;
3737 struct lpfc_dmabuf
*curr_dmabuf
, *next_dmabuf
;
3738 uint32_t ext_buf_cnt
, ext_buf_index
;
3739 struct lpfc_dmabuf
*ext_dmabuf
= NULL
;
3740 struct bsg_job_data
*dd_data
= NULL
;
3741 LPFC_MBOXQ_t
*pmboxq
= NULL
;
3747 (struct dfc_mbox_req
*)job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
3749 /* pointer to the start of mailbox command */
3750 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
3752 if (nemb_tp
== nemb_mse
) {
3753 ext_buf_cnt
= bsg_bf_get(lpfc_mbox_hdr_mse_cnt
,
3754 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
);
3755 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_MSE
) {
3756 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3757 "2945 Handled SLI_CONFIG(mse) rd, "
3758 "ext_buf_cnt(%d) out of range(%d)\n",
3760 LPFC_MBX_SLI_CONFIG_MAX_MSE
);
3764 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3765 "2941 Handled SLI_CONFIG(mse) rd, "
3766 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3768 /* sanity check on interface type for support */
3769 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
3770 LPFC_SLI_INTF_IF_TYPE_2
) {
3774 /* nemb_tp == nemb_hbd */
3775 ext_buf_cnt
= sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd_count
;
3776 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_HBD
) {
3777 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3778 "2946 Handled SLI_CONFIG(hbd) rd, "
3779 "ext_buf_cnt(%d) out of range(%d)\n",
3781 LPFC_MBX_SLI_CONFIG_MAX_HBD
);
3785 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3786 "2942 Handled SLI_CONFIG(hbd) rd, "
3787 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3790 /* before dma descriptor setup */
3791 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_rd
, dma_mbox
,
3792 sta_pre_addr
, dmabuf
, ext_buf_cnt
);
3794 /* reject non-embedded mailbox command with none external buffer */
3795 if (ext_buf_cnt
== 0) {
3798 } else if (ext_buf_cnt
> 1) {
3799 /* additional external read buffers */
3800 for (i
= 1; i
< ext_buf_cnt
; i
++) {
3801 ext_dmabuf
= lpfc_bsg_dma_page_alloc(phba
);
3806 list_add_tail(&ext_dmabuf
->list
,
3807 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3811 /* bsg tracking structure */
3812 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
3818 /* mailbox command structure for base driver */
3819 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3824 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
3826 /* for the first external buffer */
3827 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, 0, dmabuf
, dmabuf
);
3829 /* for the rest of external buffer descriptors if any */
3830 if (ext_buf_cnt
> 1) {
3832 list_for_each_entry_safe(curr_dmabuf
, next_dmabuf
,
3833 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
, list
) {
3834 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
,
3835 ext_buf_index
, dmabuf
,
3841 /* after dma descriptor setup */
3842 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_rd
, dma_mbox
,
3843 sta_pos_addr
, dmabuf
, ext_buf_cnt
);
3845 /* construct base driver mbox command */
3846 pmb
= &pmboxq
->u
.mb
;
3847 pmbx
= (uint8_t *)dmabuf
->virt
;
3848 memcpy(pmb
, pmbx
, sizeof(*pmb
));
3849 pmb
->mbxOwner
= OWN_HOST
;
3850 pmboxq
->vport
= phba
->pport
;
3852 /* multi-buffer handling context */
3853 phba
->mbox_ext_buf_ctx
.nembType
= nemb_tp
;
3854 phba
->mbox_ext_buf_ctx
.mboxType
= mbox_rd
;
3855 phba
->mbox_ext_buf_ctx
.numBuf
= ext_buf_cnt
;
3856 phba
->mbox_ext_buf_ctx
.mbxTag
= mbox_req
->extMboxTag
;
3857 phba
->mbox_ext_buf_ctx
.seqNum
= mbox_req
->extSeqNum
;
3858 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
= dmabuf
;
3860 /* callback for multi-buffer read mailbox command */
3861 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_read_mbox_ext_cmpl
;
3863 /* context fields to callback function */
3864 pmboxq
->context1
= dd_data
;
3865 dd_data
->type
= TYPE_MBOX
;
3866 dd_data
->set_job
= job
;
3867 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
3868 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pmbx
;
3869 job
->dd_data
= dd_data
;
3872 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
3875 * Non-embedded mailbox subcommand data gets byte swapped here because
3876 * the lower level driver code only does the first 64 mailbox words.
3878 if ((!bsg_bf_get(lpfc_mbox_hdr_emb
,
3879 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) &&
3880 (nemb_tp
== nemb_mse
))
3881 lpfc_sli_pcimem_bcopy(&pmbx
[sizeof(MAILBOX_t
)],
3882 &pmbx
[sizeof(MAILBOX_t
)],
3883 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3886 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
3887 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
3888 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3889 "2947 Issued SLI_CONFIG ext-buffer "
3890 "maibox command, rc:x%x\n", rc
);
3891 return SLI_CONFIG_HANDLED
;
3893 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3894 "2948 Failed to issue SLI_CONFIG ext-buffer "
3895 "maibox command, rc:x%x\n", rc
);
3900 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3901 lpfc_bsg_dma_page_list_free(phba
,
3902 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3904 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_IDLE
;
3909 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3910 * @phba: Pointer to HBA context object.
3911 * @mb: Pointer to a BSG mailbox object.
3912 * @dmabuff: Pointer to a DMA buffer descriptor.
3914 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3915 * non-embedded external bufffers.
3918 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
3919 enum nemb_type nemb_tp
,
3920 struct lpfc_dmabuf
*dmabuf
)
3922 struct dfc_mbox_req
*mbox_req
;
3923 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3924 uint32_t ext_buf_cnt
;
3925 struct bsg_job_data
*dd_data
= NULL
;
3926 LPFC_MBOXQ_t
*pmboxq
= NULL
;
3929 int rc
= SLI_CONFIG_NOT_HANDLED
, i
;
3932 (struct dfc_mbox_req
*)job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
3934 /* pointer to the start of mailbox command */
3935 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
3937 if (nemb_tp
== nemb_mse
) {
3938 ext_buf_cnt
= bsg_bf_get(lpfc_mbox_hdr_mse_cnt
,
3939 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
);
3940 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_MSE
) {
3941 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3942 "2953 Failed SLI_CONFIG(mse) wr, "
3943 "ext_buf_cnt(%d) out of range(%d)\n",
3945 LPFC_MBX_SLI_CONFIG_MAX_MSE
);
3948 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3949 "2949 Handled SLI_CONFIG(mse) wr, "
3950 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3952 /* sanity check on interface type for support */
3953 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
3954 LPFC_SLI_INTF_IF_TYPE_2
)
3956 /* nemb_tp == nemb_hbd */
3957 ext_buf_cnt
= sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd_count
;
3958 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_HBD
) {
3959 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3960 "2954 Failed SLI_CONFIG(hbd) wr, "
3961 "ext_buf_cnt(%d) out of range(%d)\n",
3963 LPFC_MBX_SLI_CONFIG_MAX_HBD
);
3966 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3967 "2950 Handled SLI_CONFIG(hbd) wr, "
3968 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3971 /* before dma buffer descriptor setup */
3972 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_wr
, dma_mbox
,
3973 sta_pre_addr
, dmabuf
, ext_buf_cnt
);
3975 if (ext_buf_cnt
== 0)
3978 /* for the first external buffer */
3979 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, 0, dmabuf
, dmabuf
);
3981 /* after dma descriptor setup */
3982 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_wr
, dma_mbox
,
3983 sta_pos_addr
, dmabuf
, ext_buf_cnt
);
3985 /* log for looking forward */
3986 for (i
= 1; i
< ext_buf_cnt
; i
++) {
3987 if (nemb_tp
== nemb_mse
)
3988 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3989 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3990 i
, sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3993 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3994 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3995 i
, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
3996 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
4000 /* multi-buffer handling context */
4001 phba
->mbox_ext_buf_ctx
.nembType
= nemb_tp
;
4002 phba
->mbox_ext_buf_ctx
.mboxType
= mbox_wr
;
4003 phba
->mbox_ext_buf_ctx
.numBuf
= ext_buf_cnt
;
4004 phba
->mbox_ext_buf_ctx
.mbxTag
= mbox_req
->extMboxTag
;
4005 phba
->mbox_ext_buf_ctx
.seqNum
= mbox_req
->extSeqNum
;
4006 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
= dmabuf
;
4008 if (ext_buf_cnt
== 1) {
4009 /* bsg tracking structure */
4010 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4016 /* mailbox command structure for base driver */
4017 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4022 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4023 pmb
= &pmboxq
->u
.mb
;
4024 mbx
= (uint8_t *)dmabuf
->virt
;
4025 memcpy(pmb
, mbx
, sizeof(*pmb
));
4026 pmb
->mbxOwner
= OWN_HOST
;
4027 pmboxq
->vport
= phba
->pport
;
4029 /* callback for multi-buffer read mailbox command */
4030 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_write_mbox_ext_cmpl
;
4032 /* context fields to callback function */
4033 pmboxq
->context1
= dd_data
;
4034 dd_data
->type
= TYPE_MBOX
;
4035 dd_data
->set_job
= job
;
4036 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4037 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)mbx
;
4038 job
->dd_data
= dd_data
;
4042 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
4043 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4044 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
4045 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4046 "2955 Issued SLI_CONFIG ext-buffer "
4047 "maibox command, rc:x%x\n", rc
);
4048 return SLI_CONFIG_HANDLED
;
4050 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4051 "2956 Failed to issue SLI_CONFIG ext-buffer "
4052 "maibox command, rc:x%x\n", rc
);
4057 /* wait for additoinal external buffers */
4059 job
->reply
->result
= 0;
4061 return SLI_CONFIG_HANDLED
;
4065 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
4072 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4073 * @phba: Pointer to HBA context object.
4074 * @mb: Pointer to a BSG mailbox object.
4075 * @dmabuff: Pointer to a DMA buffer descriptor.
4077 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4078 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4079 * with embedded sussystem 0x1 and opcodes with external HBDs.
4082 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
4083 struct lpfc_dmabuf
*dmabuf
)
4085 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4088 int rc
= SLI_CONFIG_NOT_HANDLED
;
4090 /* state change on new multi-buffer pass-through mailbox command */
4091 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_HOST
;
4093 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
4095 if (!bsg_bf_get(lpfc_mbox_hdr_emb
,
4096 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) {
4097 subsys
= bsg_bf_get(lpfc_emb0_subcmnd_subsys
,
4098 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
);
4099 opcode
= bsg_bf_get(lpfc_emb0_subcmnd_opcode
,
4100 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
);
4101 if (subsys
== SLI_CONFIG_SUBSYS_FCOE
) {
4103 case FCOE_OPCODE_READ_FCF
:
4104 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4105 "2957 Handled SLI_CONFIG "
4106 "subsys_fcoe, opcode:x%x\n",
4108 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4111 case FCOE_OPCODE_ADD_FCF
:
4112 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4113 "2958 Handled SLI_CONFIG "
4114 "subsys_fcoe, opcode:x%x\n",
4116 rc
= lpfc_bsg_sli_cfg_write_cmd_ext(phba
, job
,
4120 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4121 "2959 Reject SLI_CONFIG "
4122 "subsys_fcoe, opcode:x%x\n",
4127 } else if (subsys
== SLI_CONFIG_SUBSYS_COMN
) {
4129 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES
:
4130 case COMN_OPCODE_GET_CNTL_ATTRIBUTES
:
4131 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4132 "3106 Handled SLI_CONFIG "
4133 "subsys_comn, opcode:x%x\n",
4135 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4139 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4140 "3107 Reject SLI_CONFIG "
4141 "subsys_comn, opcode:x%x\n",
4147 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4148 "2977 Reject SLI_CONFIG "
4149 "subsys:x%d, opcode:x%x\n",
4154 subsys
= bsg_bf_get(lpfc_emb1_subcmnd_subsys
,
4155 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
);
4156 opcode
= bsg_bf_get(lpfc_emb1_subcmnd_opcode
,
4157 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
);
4158 if (subsys
== SLI_CONFIG_SUBSYS_COMN
) {
4160 case COMN_OPCODE_READ_OBJECT
:
4161 case COMN_OPCODE_READ_OBJECT_LIST
:
4162 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4163 "2960 Handled SLI_CONFIG "
4164 "subsys_comn, opcode:x%x\n",
4166 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4169 case COMN_OPCODE_WRITE_OBJECT
:
4170 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4171 "2961 Handled SLI_CONFIG "
4172 "subsys_comn, opcode:x%x\n",
4174 rc
= lpfc_bsg_sli_cfg_write_cmd_ext(phba
, job
,
4178 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4179 "2962 Not handled SLI_CONFIG "
4180 "subsys_comn, opcode:x%x\n",
4182 rc
= SLI_CONFIG_NOT_HANDLED
;
4186 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4187 "2978 Not handled SLI_CONFIG "
4188 "subsys:x%d, opcode:x%x\n",
4190 rc
= SLI_CONFIG_NOT_HANDLED
;
4194 /* state reset on not handled new multi-buffer mailbox command */
4195 if (rc
!= SLI_CONFIG_HANDLED
)
4196 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_IDLE
;
4202 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4203 * @phba: Pointer to HBA context object.
4205 * This routine is for requesting to abort a pass-through mailbox command with
4206 * multiple external buffers due to error condition.
4209 lpfc_bsg_mbox_ext_abort(struct lpfc_hba
*phba
)
4211 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_PORT
)
4212 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_ABTS
;
4214 lpfc_bsg_mbox_ext_session_reset(phba
);
4219 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4220 * @phba: Pointer to HBA context object.
4221 * @dmabuf: Pointer to a DMA buffer descriptor.
4223 * This routine extracts the next mailbox read external buffer back to
4224 * user space through BSG.
4227 lpfc_bsg_read_ebuf_get(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
)
4229 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4230 struct lpfc_dmabuf
*dmabuf
;
4235 index
= phba
->mbox_ext_buf_ctx
.seqNum
;
4236 phba
->mbox_ext_buf_ctx
.seqNum
++;
4238 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)
4239 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
->virt
;
4241 if (phba
->mbox_ext_buf_ctx
.nembType
== nemb_mse
) {
4242 size
= bsg_bf_get(lpfc_mbox_sli_config_mse_len
,
4243 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.mse
[index
]);
4244 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4245 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4246 "buffer[%d], size:%d\n", index
, size
);
4248 size
= bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
4249 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd
[index
]);
4250 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4251 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4252 "buffer[%d], size:%d\n", index
, size
);
4254 if (list_empty(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
))
4256 dmabuf
= list_first_entry(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
,
4257 struct lpfc_dmabuf
, list
);
4258 list_del_init(&dmabuf
->list
);
4260 /* after dma buffer descriptor setup */
4261 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, phba
->mbox_ext_buf_ctx
.nembType
,
4262 mbox_rd
, dma_ebuf
, sta_pos_addr
,
4265 pbuf
= (uint8_t *)dmabuf
->virt
;
4266 job
->reply
->reply_payload_rcv_len
=
4267 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
4268 job
->reply_payload
.sg_cnt
,
4271 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4273 if (phba
->mbox_ext_buf_ctx
.seqNum
== phba
->mbox_ext_buf_ctx
.numBuf
) {
4274 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4275 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4276 "command session done\n");
4277 lpfc_bsg_mbox_ext_session_reset(phba
);
4280 job
->reply
->result
= 0;
4283 return SLI_CONFIG_HANDLED
;
4287 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4288 * @phba: Pointer to HBA context object.
4289 * @dmabuf: Pointer to a DMA buffer descriptor.
4291 * This routine sets up the next mailbox read external buffer obtained
4292 * from user space through BSG.
4295 lpfc_bsg_write_ebuf_set(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
4296 struct lpfc_dmabuf
*dmabuf
)
4298 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4299 struct bsg_job_data
*dd_data
= NULL
;
4300 LPFC_MBOXQ_t
*pmboxq
= NULL
;
4302 enum nemb_type nemb_tp
;
4308 index
= phba
->mbox_ext_buf_ctx
.seqNum
;
4309 phba
->mbox_ext_buf_ctx
.seqNum
++;
4310 nemb_tp
= phba
->mbox_ext_buf_ctx
.nembType
;
4312 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)
4313 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
->virt
;
4315 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4321 pbuf
= (uint8_t *)dmabuf
->virt
;
4322 size
= job
->request_payload
.payload_len
;
4323 sg_copy_to_buffer(job
->request_payload
.sg_list
,
4324 job
->request_payload
.sg_cnt
,
4327 if (phba
->mbox_ext_buf_ctx
.nembType
== nemb_mse
) {
4328 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4329 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4330 "buffer[%d], size:%d\n",
4331 phba
->mbox_ext_buf_ctx
.seqNum
, size
);
4334 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4335 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4336 "buffer[%d], size:%d\n",
4337 phba
->mbox_ext_buf_ctx
.seqNum
, size
);
4341 /* set up external buffer descriptor and add to external buffer list */
4342 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, index
,
4343 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
,
4345 list_add_tail(&dmabuf
->list
, &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
4347 /* after write dma buffer */
4348 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, phba
->mbox_ext_buf_ctx
.nembType
,
4349 mbox_wr
, dma_ebuf
, sta_pos_addr
,
4352 if (phba
->mbox_ext_buf_ctx
.seqNum
== phba
->mbox_ext_buf_ctx
.numBuf
) {
4353 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4354 "2968 SLI_CONFIG ext-buffer wr all %d "
4355 "ebuffers received\n",
4356 phba
->mbox_ext_buf_ctx
.numBuf
);
4357 /* mailbox command structure for base driver */
4358 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4363 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4364 pbuf
= (uint8_t *)phba
->mbox_ext_buf_ctx
.mbx_dmabuf
->virt
;
4365 pmb
= &pmboxq
->u
.mb
;
4366 memcpy(pmb
, pbuf
, sizeof(*pmb
));
4367 pmb
->mbxOwner
= OWN_HOST
;
4368 pmboxq
->vport
= phba
->pport
;
4370 /* callback for multi-buffer write mailbox command */
4371 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_write_mbox_ext_cmpl
;
4373 /* context fields to callback function */
4374 pmboxq
->context1
= dd_data
;
4375 dd_data
->type
= TYPE_MBOX
;
4376 dd_data
->set_job
= job
;
4377 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4378 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pbuf
;
4379 job
->dd_data
= dd_data
;
4382 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
4384 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4385 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
4386 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4387 "2969 Issued SLI_CONFIG ext-buffer "
4388 "maibox command, rc:x%x\n", rc
);
4389 return SLI_CONFIG_HANDLED
;
4391 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4392 "2970 Failed to issue SLI_CONFIG ext-buffer "
4393 "maibox command, rc:x%x\n", rc
);
4398 /* wait for additoinal external buffers */
4399 job
->reply
->result
= 0;
4401 return SLI_CONFIG_HANDLED
;
4404 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4411 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4412 * @phba: Pointer to HBA context object.
4413 * @mb: Pointer to a BSG mailbox object.
4414 * @dmabuff: Pointer to a DMA buffer descriptor.
4416 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4417 * command with multiple non-embedded external buffers.
4420 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
4421 struct lpfc_dmabuf
*dmabuf
)
4425 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4426 "2971 SLI_CONFIG buffer (type:x%x)\n",
4427 phba
->mbox_ext_buf_ctx
.mboxType
);
4429 if (phba
->mbox_ext_buf_ctx
.mboxType
== mbox_rd
) {
4430 if (phba
->mbox_ext_buf_ctx
.state
!= LPFC_BSG_MBOX_DONE
) {
4431 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4432 "2972 SLI_CONFIG rd buffer state "
4434 phba
->mbox_ext_buf_ctx
.state
);
4435 lpfc_bsg_mbox_ext_abort(phba
);
4438 rc
= lpfc_bsg_read_ebuf_get(phba
, job
);
4439 if (rc
== SLI_CONFIG_HANDLED
)
4440 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4441 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4442 if (phba
->mbox_ext_buf_ctx
.state
!= LPFC_BSG_MBOX_HOST
) {
4443 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4444 "2973 SLI_CONFIG wr buffer state "
4446 phba
->mbox_ext_buf_ctx
.state
);
4447 lpfc_bsg_mbox_ext_abort(phba
);
4450 rc
= lpfc_bsg_write_ebuf_set(phba
, job
, dmabuf
);
4456 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4457 * @phba: Pointer to HBA context object.
4458 * @mb: Pointer to a BSG mailbox object.
4459 * @dmabuff: Pointer to a DMA buffer descriptor.
4461 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4462 * (0x9B) mailbox commands and external buffers.
4465 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
4466 struct lpfc_dmabuf
*dmabuf
)
4468 struct dfc_mbox_req
*mbox_req
;
4469 int rc
= SLI_CONFIG_NOT_HANDLED
;
4472 (struct dfc_mbox_req
*)job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
4474 /* mbox command with/without single external buffer */
4475 if (mbox_req
->extMboxTag
== 0 && mbox_req
->extSeqNum
== 0)
4478 /* mbox command and first external buffer */
4479 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_IDLE
) {
4480 if (mbox_req
->extSeqNum
== 1) {
4481 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4482 "2974 SLI_CONFIG mailbox: tag:%d, "
4483 "seq:%d\n", mbox_req
->extMboxTag
,
4484 mbox_req
->extSeqNum
);
4485 rc
= lpfc_bsg_handle_sli_cfg_mbox(phba
, job
, dmabuf
);
4488 goto sli_cfg_ext_error
;
4492 * handle additional external buffers
4495 /* check broken pipe conditions */
4496 if (mbox_req
->extMboxTag
!= phba
->mbox_ext_buf_ctx
.mbxTag
)
4497 goto sli_cfg_ext_error
;
4498 if (mbox_req
->extSeqNum
> phba
->mbox_ext_buf_ctx
.numBuf
)
4499 goto sli_cfg_ext_error
;
4500 if (mbox_req
->extSeqNum
!= phba
->mbox_ext_buf_ctx
.seqNum
+ 1)
4501 goto sli_cfg_ext_error
;
4503 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4504 "2975 SLI_CONFIG mailbox external buffer: "
4505 "extSta:x%x, tag:%d, seq:%d\n",
4506 phba
->mbox_ext_buf_ctx
.state
, mbox_req
->extMboxTag
,
4507 mbox_req
->extSeqNum
);
4508 rc
= lpfc_bsg_handle_sli_cfg_ebuf(phba
, job
, dmabuf
);
4512 /* all other cases, broken pipe */
4513 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4514 "2976 SLI_CONFIG mailbox broken pipe: "
4515 "ctxSta:x%x, ctxNumBuf:%d "
4516 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4517 phba
->mbox_ext_buf_ctx
.state
,
4518 phba
->mbox_ext_buf_ctx
.numBuf
,
4519 phba
->mbox_ext_buf_ctx
.mbxTag
,
4520 phba
->mbox_ext_buf_ctx
.seqNum
,
4521 mbox_req
->extMboxTag
, mbox_req
->extSeqNum
);
4523 lpfc_bsg_mbox_ext_session_reset(phba
);
4529 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4530 * @phba: Pointer to HBA context object.
4531 * @mb: Pointer to a mailbox object.
4532 * @vport: Pointer to a vport object.
4534 * Allocate a tracking object, mailbox command memory, get a mailbox
4535 * from the mailbox pool, copy the caller mailbox command.
4537 * If offline and the sli is active we need to poll for the command (port is
4538 * being reset) and com-plete the job, otherwise issue the mailbox command and
4539 * let our completion handler finish the command.
4542 lpfc_bsg_issue_mbox(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
4543 struct lpfc_vport
*vport
)
4545 LPFC_MBOXQ_t
*pmboxq
= NULL
; /* internal mailbox queue */
4546 MAILBOX_t
*pmb
; /* shortcut to the pmboxq mailbox */
4547 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4548 uint8_t *pmbx
= NULL
;
4549 struct bsg_job_data
*dd_data
= NULL
; /* bsg data tracking structure */
4550 struct lpfc_dmabuf
*dmabuf
= NULL
;
4551 struct dfc_mbox_req
*mbox_req
;
4552 struct READ_EVENT_LOG_VAR
*rdEventLog
;
4553 uint32_t transmit_length
, receive_length
, mode
;
4554 struct lpfc_mbx_sli4_config
*sli4_config
;
4555 struct lpfc_mbx_nembed_cmd
*nembed_sge
;
4556 struct mbox_header
*header
;
4557 struct ulp_bde64
*bde
;
4558 uint8_t *ext
= NULL
;
4563 /* in case no data is transferred */
4564 job
->reply
->reply_payload_rcv_len
= 0;
4566 /* sanity check to protect driver */
4567 if (job
->reply_payload
.payload_len
> BSG_MBOX_SIZE
||
4568 job
->request_payload
.payload_len
> BSG_MBOX_SIZE
) {
4574 * Don't allow mailbox commands to be sent when blocked or when in
4575 * the middle of discovery
4577 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
4583 (struct dfc_mbox_req
*)job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
4585 /* check if requested extended data lengths are valid */
4586 if ((mbox_req
->inExtWLen
> BSG_MBOX_SIZE
/sizeof(uint32_t)) ||
4587 (mbox_req
->outExtWLen
> BSG_MBOX_SIZE
/sizeof(uint32_t))) {
4592 dmabuf
= lpfc_bsg_dma_page_alloc(phba
);
4593 if (!dmabuf
|| !dmabuf
->virt
) {
4598 /* Get the mailbox command or external buffer from BSG */
4599 pmbx
= (uint8_t *)dmabuf
->virt
;
4600 size
= job
->request_payload
.payload_len
;
4601 sg_copy_to_buffer(job
->request_payload
.sg_list
,
4602 job
->request_payload
.sg_cnt
, pmbx
, size
);
4604 /* Handle possible SLI_CONFIG with non-embedded payloads */
4605 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4606 rc
= lpfc_bsg_handle_sli_cfg_ext(phba
, job
, dmabuf
);
4607 if (rc
== SLI_CONFIG_HANDLED
)
4611 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4614 rc
= lpfc_bsg_check_cmd_access(phba
, (MAILBOX_t
*)pmbx
, vport
);
4616 goto job_done
; /* must be negative */
4618 /* allocate our bsg tracking structure */
4619 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4621 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
4622 "2727 Failed allocation of dd_data\n");
4627 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4632 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4634 pmb
= &pmboxq
->u
.mb
;
4635 memcpy(pmb
, pmbx
, sizeof(*pmb
));
4636 pmb
->mbxOwner
= OWN_HOST
;
4637 pmboxq
->vport
= vport
;
4639 /* If HBA encountered an error attention, allow only DUMP
4640 * or RESTART mailbox commands until the HBA is restarted.
4642 if (phba
->pport
->stopped
&&
4643 pmb
->mbxCommand
!= MBX_DUMP_MEMORY
&&
4644 pmb
->mbxCommand
!= MBX_RESTART
&&
4645 pmb
->mbxCommand
!= MBX_WRITE_VPARMS
&&
4646 pmb
->mbxCommand
!= MBX_WRITE_WWN
)
4647 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
4648 "2797 mbox: Issued mailbox cmd "
4649 "0x%x while in stopped state.\n",
4652 /* extended mailbox commands will need an extended buffer */
4653 if (mbox_req
->inExtWLen
|| mbox_req
->outExtWLen
) {
4655 ext
= from
+ sizeof(MAILBOX_t
);
4656 pmboxq
->context2
= ext
;
4657 pmboxq
->in_ext_byte_len
=
4658 mbox_req
->inExtWLen
* sizeof(uint32_t);
4659 pmboxq
->out_ext_byte_len
=
4660 mbox_req
->outExtWLen
* sizeof(uint32_t);
4661 pmboxq
->mbox_offset_word
= mbox_req
->mbOffset
;
4664 /* biu diag will need a kernel buffer to transfer the data
4665 * allocate our own buffer and setup the mailbox command to
4668 if (pmb
->mbxCommand
== MBX_RUN_BIU_DIAG64
) {
4669 transmit_length
= pmb
->un
.varWords
[1];
4670 receive_length
= pmb
->un
.varWords
[4];
4671 /* transmit length cannot be greater than receive length or
4672 * mailbox extension size
4674 if ((transmit_length
> receive_length
) ||
4675 (transmit_length
> BSG_MBOX_SIZE
- sizeof(MAILBOX_t
))) {
4679 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrHigh
=
4680 putPaddrHigh(dmabuf
->phys
+ sizeof(MAILBOX_t
));
4681 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrLow
=
4682 putPaddrLow(dmabuf
->phys
+ sizeof(MAILBOX_t
));
4684 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrHigh
=
4685 putPaddrHigh(dmabuf
->phys
+ sizeof(MAILBOX_t
)
4686 + pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.tus
.f
.bdeSize
);
4687 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrLow
=
4688 putPaddrLow(dmabuf
->phys
+ sizeof(MAILBOX_t
)
4689 + pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.tus
.f
.bdeSize
);
4690 } else if (pmb
->mbxCommand
== MBX_READ_EVENT_LOG
) {
4691 rdEventLog
= &pmb
->un
.varRdEventLog
;
4692 receive_length
= rdEventLog
->rcv_bde64
.tus
.f
.bdeSize
;
4693 mode
= bf_get(lpfc_event_log
, rdEventLog
);
4695 /* receive length cannot be greater than mailbox
4698 if (receive_length
> BSG_MBOX_SIZE
- sizeof(MAILBOX_t
)) {
4703 /* mode zero uses a bde like biu diags command */
4705 pmb
->un
.varWords
[3] = putPaddrLow(dmabuf
->phys
4706 + sizeof(MAILBOX_t
));
4707 pmb
->un
.varWords
[4] = putPaddrHigh(dmabuf
->phys
4708 + sizeof(MAILBOX_t
));
4710 } else if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4711 /* Let type 4 (well known data) through because the data is
4712 * returned in varwords[4-8]
4713 * otherwise check the recieve length and fetch the buffer addr
4715 if ((pmb
->mbxCommand
== MBX_DUMP_MEMORY
) &&
4716 (pmb
->un
.varDmp
.type
!= DMP_WELL_KNOWN
)) {
4717 /* rebuild the command for sli4 using our own buffers
4718 * like we do for biu diags
4720 receive_length
= pmb
->un
.varWords
[2];
4721 /* receive length cannot be greater than mailbox
4724 if (receive_length
== 0) {
4728 pmb
->un
.varWords
[3] = putPaddrLow(dmabuf
->phys
4729 + sizeof(MAILBOX_t
));
4730 pmb
->un
.varWords
[4] = putPaddrHigh(dmabuf
->phys
4731 + sizeof(MAILBOX_t
));
4732 } else if ((pmb
->mbxCommand
== MBX_UPDATE_CFG
) &&
4733 pmb
->un
.varUpdateCfg
.co
) {
4734 bde
= (struct ulp_bde64
*)&pmb
->un
.varWords
[4];
4736 /* bde size cannot be greater than mailbox ext size */
4737 if (bde
->tus
.f
.bdeSize
>
4738 BSG_MBOX_SIZE
- sizeof(MAILBOX_t
)) {
4742 bde
->addrHigh
= putPaddrHigh(dmabuf
->phys
4743 + sizeof(MAILBOX_t
));
4744 bde
->addrLow
= putPaddrLow(dmabuf
->phys
4745 + sizeof(MAILBOX_t
));
4746 } else if (pmb
->mbxCommand
== MBX_SLI4_CONFIG
) {
4747 /* Handling non-embedded SLI_CONFIG mailbox command */
4748 sli4_config
= &pmboxq
->u
.mqe
.un
.sli4_config
;
4749 if (!bf_get(lpfc_mbox_hdr_emb
,
4750 &sli4_config
->header
.cfg_mhdr
)) {
4751 /* rebuild the command for sli4 using our
4752 * own buffers like we do for biu diags
4754 header
= (struct mbox_header
*)
4755 &pmb
->un
.varWords
[0];
4756 nembed_sge
= (struct lpfc_mbx_nembed_cmd
*)
4757 &pmb
->un
.varWords
[0];
4758 receive_length
= nembed_sge
->sge
[0].length
;
4760 /* receive length cannot be greater than
4761 * mailbox extension size
4763 if ((receive_length
== 0) ||
4765 BSG_MBOX_SIZE
- sizeof(MAILBOX_t
))) {
4770 nembed_sge
->sge
[0].pa_hi
=
4771 putPaddrHigh(dmabuf
->phys
4772 + sizeof(MAILBOX_t
));
4773 nembed_sge
->sge
[0].pa_lo
=
4774 putPaddrLow(dmabuf
->phys
4775 + sizeof(MAILBOX_t
));
4780 dd_data
->context_un
.mbox
.dmabuffers
= dmabuf
;
4782 /* setup wake call as IOCB callback */
4783 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_mbox_cmpl
;
4785 /* setup context field to pass wait_queue pointer to wake function */
4786 pmboxq
->context1
= dd_data
;
4787 dd_data
->type
= TYPE_MBOX
;
4788 dd_data
->set_job
= job
;
4789 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4790 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pmbx
;
4791 dd_data
->context_un
.mbox
.ext
= ext
;
4792 dd_data
->context_un
.mbox
.mbOffset
= mbox_req
->mbOffset
;
4793 dd_data
->context_un
.mbox
.inExtWLen
= mbox_req
->inExtWLen
;
4794 dd_data
->context_un
.mbox
.outExtWLen
= mbox_req
->outExtWLen
;
4795 job
->dd_data
= dd_data
;
4797 if ((vport
->fc_flag
& FC_OFFLINE_MODE
) ||
4798 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
))) {
4799 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
4800 if (rc
!= MBX_SUCCESS
) {
4801 rc
= (rc
== MBX_TIMEOUT
) ? -ETIME
: -ENODEV
;
4805 /* job finished, copy the data */
4806 memcpy(pmbx
, pmb
, sizeof(*pmb
));
4807 job
->reply
->reply_payload_rcv_len
=
4808 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
4809 job
->reply_payload
.sg_cnt
,
4811 /* not waiting mbox already done */
4816 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4817 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
))
4818 return 1; /* job started */
4821 /* common exit for error or job completed inline */
4823 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
4824 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4832 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4833 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4836 lpfc_bsg_mbox_cmd(struct fc_bsg_job
*job
)
4838 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
4839 struct lpfc_hba
*phba
= vport
->phba
;
4840 struct dfc_mbox_req
*mbox_req
;
4843 /* mix-and-match backward compatibility */
4844 job
->reply
->reply_payload_rcv_len
= 0;
4845 if (job
->request_len
<
4846 sizeof(struct fc_bsg_request
) + sizeof(struct dfc_mbox_req
)) {
4847 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4848 "2737 Mix-and-match backward compatibility "
4849 "between MBOX_REQ old size:%d and "
4850 "new request size:%d\n",
4851 (int)(job
->request_len
-
4852 sizeof(struct fc_bsg_request
)),
4853 (int)sizeof(struct dfc_mbox_req
));
4854 mbox_req
= (struct dfc_mbox_req
*)
4855 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
4856 mbox_req
->extMboxTag
= 0;
4857 mbox_req
->extSeqNum
= 0;
4860 rc
= lpfc_bsg_issue_mbox(phba
, job
, vport
);
4864 job
->reply
->result
= 0;
4865 job
->dd_data
= NULL
;
4868 /* job submitted, will complete later*/
4869 rc
= 0; /* return zero, no error */
4871 /* some error occurred */
4872 job
->reply
->result
= rc
;
4873 job
->dd_data
= NULL
;
4880 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4881 * @phba: Pointer to HBA context object.
4882 * @cmdiocbq: Pointer to command iocb.
4883 * @rspiocbq: Pointer to response iocb.
4885 * This function is the completion handler for iocbs issued using
4886 * lpfc_menlo_cmd function. This function is called by the
4887 * ring event handler function without any lock held. This function
4888 * can be called from both worker thread context and interrupt
4889 * context. This function also can be called from another thread which
4890 * cleans up the SLI layer objects.
4891 * This function copies the contents of the response iocb to the
4892 * response iocb memory object provided by the caller of
4893 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4894 * sleeps for the iocb completion.
4897 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba
*phba
,
4898 struct lpfc_iocbq
*cmdiocbq
,
4899 struct lpfc_iocbq
*rspiocbq
)
4901 struct bsg_job_data
*dd_data
;
4902 struct fc_bsg_job
*job
;
4904 struct lpfc_dmabuf
*bmp
, *cmp
, *rmp
;
4905 struct lpfc_bsg_menlo
*menlo
;
4906 unsigned long flags
;
4907 struct menlo_response
*menlo_resp
;
4908 unsigned int rsp_size
;
4911 dd_data
= cmdiocbq
->context1
;
4912 cmp
= cmdiocbq
->context2
;
4913 bmp
= cmdiocbq
->context3
;
4914 menlo
= &dd_data
->context_un
.menlo
;
4916 rsp
= &rspiocbq
->iocb
;
4918 /* Determine if job has been aborted */
4919 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
4920 job
= dd_data
->set_job
;
4922 /* Prevent timeout handling from trying to abort job */
4923 job
->dd_data
= NULL
;
4925 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
4927 /* Copy the job data or set the failing status for the job */
4930 /* always return the xri, this would be used in the case
4931 * of a menlo download to allow the data to be sent as a
4932 * continuation of the exchange.
4935 menlo_resp
= (struct menlo_response
*)
4936 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
4937 menlo_resp
->xri
= rsp
->ulpContext
;
4938 if (rsp
->ulpStatus
) {
4939 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
4940 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
4941 case IOERR_SEQUENCE_TIMEOUT
:
4944 case IOERR_INVALID_RPI
:
4955 rsp_size
= rsp
->un
.genreq64
.bdl
.bdeSize
;
4956 job
->reply
->reply_payload_rcv_len
=
4957 lpfc_bsg_copy_data(rmp
, &job
->reply_payload
,
4963 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
4964 lpfc_free_bsg_buffers(phba
, cmp
);
4965 lpfc_free_bsg_buffers(phba
, rmp
);
4966 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
4970 /* Complete the job if active */
4973 job
->reply
->result
= rc
;
4981 * lpfc_menlo_cmd - send an ioctl for menlo hardware
4982 * @job: fc_bsg_job to handle
4984 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4985 * all the command completions will return the xri for the command.
4986 * For menlo data requests a gen request 64 CX is used to continue the exchange
4987 * supplied in the menlo request header xri field.
4990 lpfc_menlo_cmd(struct fc_bsg_job
*job
)
4992 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
4993 struct lpfc_hba
*phba
= vport
->phba
;
4994 struct lpfc_iocbq
*cmdiocbq
;
4997 struct menlo_command
*menlo_cmd
;
4998 struct menlo_response
*menlo_resp
;
4999 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
, *rmp
= NULL
;
5002 struct bsg_job_data
*dd_data
;
5003 struct ulp_bde64
*bpl
= NULL
;
5005 /* in case no data is returned return just the return code */
5006 job
->reply
->reply_payload_rcv_len
= 0;
5008 if (job
->request_len
<
5009 sizeof(struct fc_bsg_request
) +
5010 sizeof(struct menlo_command
)) {
5011 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5012 "2784 Received MENLO_CMD request below "
5018 if (job
->reply_len
<
5019 sizeof(struct fc_bsg_request
) + sizeof(struct menlo_response
)) {
5020 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5021 "2785 Received MENLO_CMD reply below "
5027 if (!(phba
->menlo_flag
& HBA_MENLO_SUPPORT
)) {
5028 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5029 "2786 Adapter does not support menlo "
5035 menlo_cmd
= (struct menlo_command
*)
5036 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
5038 menlo_resp
= (struct menlo_response
*)
5039 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
5041 /* allocate our bsg tracking structure */
5042 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
5044 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5045 "2787 Failed allocation of dd_data\n");
5050 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5056 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
5062 INIT_LIST_HEAD(&bmp
->list
);
5064 bpl
= (struct ulp_bde64
*)bmp
->virt
;
5065 request_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
);
5066 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
5067 1, bpl
, &request_nseg
);
5072 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
5073 job
->request_payload
.payload_len
, 1);
5075 bpl
+= request_nseg
;
5076 reply_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
) - request_nseg
;
5077 rmp
= lpfc_alloc_bsg_buffers(phba
, job
->reply_payload
.payload_len
, 0,
5084 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
5090 cmd
= &cmdiocbq
->iocb
;
5091 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
5092 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
5093 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
5094 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
5095 cmd
->un
.genreq64
.bdl
.bdeSize
=
5096 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
5097 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
5098 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
5099 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CMD
;
5100 cmd
->un
.genreq64
.w5
.hcsw
.Type
= MENLO_TRANSPORT_TYPE
; /* 0xfe */
5101 cmd
->ulpBdeCount
= 1;
5102 cmd
->ulpClass
= CLASS3
;
5103 cmd
->ulpOwner
= OWN_CHIP
;
5104 cmd
->ulpLe
= 1; /* Limited Edition */
5105 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
5106 cmdiocbq
->vport
= phba
->pport
;
5107 /* We want the firmware to timeout before we do */
5108 cmd
->ulpTimeout
= MENLO_TIMEOUT
- 5;
5109 cmdiocbq
->iocb_cmpl
= lpfc_bsg_menlo_cmd_cmp
;
5110 cmdiocbq
->context1
= dd_data
;
5111 cmdiocbq
->context2
= cmp
;
5112 cmdiocbq
->context3
= bmp
;
5113 if (menlo_cmd
->cmd
== LPFC_BSG_VENDOR_MENLO_CMD
) {
5114 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
5115 cmd
->ulpPU
= MENLO_PU
; /* 3 */
5116 cmd
->un
.ulpWord
[4] = MENLO_DID
; /* 0x0000FC0E */
5117 cmd
->ulpContext
= MENLO_CONTEXT
; /* 0 */
5119 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CX
;
5121 cmd
->un
.ulpWord
[4] = 0;
5122 cmd
->ulpContext
= menlo_cmd
->xri
;
5125 dd_data
->type
= TYPE_MENLO
;
5126 dd_data
->set_job
= job
;
5127 dd_data
->context_un
.menlo
.cmdiocbq
= cmdiocbq
;
5128 dd_data
->context_un
.menlo
.rmp
= rmp
;
5129 job
->dd_data
= dd_data
;
5131 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
5133 if (rc
== IOCB_SUCCESS
)
5134 return 0; /* done for now */
5136 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
5139 lpfc_free_bsg_buffers(phba
, rmp
);
5141 lpfc_free_bsg_buffers(phba
, cmp
);
5144 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
5149 /* make error code available to userspace */
5150 job
->reply
->result
= rc
;
5151 job
->dd_data
= NULL
;
5156 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5157 * @job: fc_bsg_job to handle
5160 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
5162 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
5166 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
5167 rc
= lpfc_bsg_hba_set_event(job
);
5169 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
5170 rc
= lpfc_bsg_hba_get_event(job
);
5172 case LPFC_BSG_VENDOR_SEND_MGMT_RESP
:
5173 rc
= lpfc_bsg_send_mgmt_rsp(job
);
5175 case LPFC_BSG_VENDOR_DIAG_MODE
:
5176 rc
= lpfc_bsg_diag_loopback_mode(job
);
5178 case LPFC_BSG_VENDOR_DIAG_MODE_END
:
5179 rc
= lpfc_sli4_bsg_diag_mode_end(job
);
5181 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK
:
5182 rc
= lpfc_bsg_diag_loopback_run(job
);
5184 case LPFC_BSG_VENDOR_LINK_DIAG_TEST
:
5185 rc
= lpfc_sli4_bsg_link_diag_test(job
);
5187 case LPFC_BSG_VENDOR_GET_MGMT_REV
:
5188 rc
= lpfc_bsg_get_dfc_rev(job
);
5190 case LPFC_BSG_VENDOR_MBOX
:
5191 rc
= lpfc_bsg_mbox_cmd(job
);
5193 case LPFC_BSG_VENDOR_MENLO_CMD
:
5194 case LPFC_BSG_VENDOR_MENLO_DATA
:
5195 rc
= lpfc_menlo_cmd(job
);
5199 job
->reply
->reply_payload_rcv_len
= 0;
5200 /* make error code available to userspace */
5201 job
->reply
->result
= rc
;
5209 * lpfc_bsg_request - handle a bsg request from the FC transport
5210 * @job: fc_bsg_job to handle
5213 lpfc_bsg_request(struct fc_bsg_job
*job
)
5218 msgcode
= job
->request
->msgcode
;
5220 case FC_BSG_HST_VENDOR
:
5221 rc
= lpfc_bsg_hst_vendor(job
);
5223 case FC_BSG_RPT_ELS
:
5224 rc
= lpfc_bsg_rport_els(job
);
5227 rc
= lpfc_bsg_send_mgmt_cmd(job
);
5231 job
->reply
->reply_payload_rcv_len
= 0;
5232 /* make error code available to userspace */
5233 job
->reply
->result
= rc
;
5241 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5242 * @job: fc_bsg_job that has timed out
5244 * This function just aborts the job's IOCB. The aborted IOCB will return to
5245 * the waiting function which will handle passing the error back to userspace
5248 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
5250 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
5251 struct lpfc_hba
*phba
= vport
->phba
;
5252 struct lpfc_iocbq
*cmdiocb
;
5253 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
5254 struct bsg_job_data
*dd_data
;
5255 unsigned long flags
;
5257 LIST_HEAD(completions
);
5258 struct lpfc_iocbq
*check_iocb
, *next_iocb
;
5260 /* if job's driver data is NULL, the command completed or is in the
5261 * the process of completing. In this case, return status to request
5262 * so the timeout is retried. This avoids double completion issues
5263 * and the request will be pulled off the timer queue when the
5264 * command's completion handler executes. Otherwise, prevent the
5265 * command's completion handler from executing the job done callback
5266 * and continue processing to abort the outstanding the command.
5269 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
5270 dd_data
= (struct bsg_job_data
*)job
->dd_data
;
5272 dd_data
->set_job
= NULL
;
5273 job
->dd_data
= NULL
;
5275 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5279 switch (dd_data
->type
) {
5281 /* Check to see if IOCB was issued to the port or not. If not,
5282 * remove it from the txq queue and call cancel iocbs.
5283 * Otherwise, call abort iotag
5286 cmdiocb
= dd_data
->context_un
.iocb
.cmdiocbq
;
5287 spin_lock_irq(&phba
->hbalock
);
5288 list_for_each_entry_safe(check_iocb
, next_iocb
, &pring
->txq
,
5290 if (check_iocb
== cmdiocb
) {
5291 list_move_tail(&check_iocb
->list
, &completions
);
5295 if (list_empty(&completions
))
5296 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
5297 spin_unlock_irq(&phba
->hbalock
);
5298 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5299 if (!list_empty(&completions
)) {
5300 lpfc_sli_cancel_iocbs(phba
, &completions
,
5301 IOSTAT_LOCAL_REJECT
,
5307 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5311 /* Update the ext buf ctx state if needed */
5313 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_PORT
)
5314 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_ABTS
;
5315 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5318 /* Check to see if IOCB was issued to the port or not. If not,
5319 * remove it from the txq queue and call cancel iocbs.
5320 * Otherwise, call abort iotag.
5323 cmdiocb
= dd_data
->context_un
.menlo
.cmdiocbq
;
5324 spin_lock_irq(&phba
->hbalock
);
5325 list_for_each_entry_safe(check_iocb
, next_iocb
, &pring
->txq
,
5327 if (check_iocb
== cmdiocb
) {
5328 list_move_tail(&check_iocb
->list
, &completions
);
5332 if (list_empty(&completions
))
5333 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
5334 spin_unlock_irq(&phba
->hbalock
);
5335 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5336 if (!list_empty(&completions
)) {
5337 lpfc_sli_cancel_iocbs(phba
, &completions
,
5338 IOSTAT_LOCAL_REJECT
,
5343 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5347 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5348 * otherwise an error message will be displayed on the console
5349 * so always return success (zero)