2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include "qlcnic_sriov.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
13 #define QLC_BC_COMMAND 0
14 #define QLC_BC_RESPONSE 1
16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
20 #define QLC_BC_CFREE 1
21 #define QLC_BC_HDR_SZ 16
22 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
24 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter
*,
25 struct qlcnic_cmd_args
*);
27 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops
= {
28 .read_crb
= qlcnic_83xx_read_crb
,
29 .write_crb
= qlcnic_83xx_write_crb
,
30 .read_reg
= qlcnic_83xx_rd_reg_indirect
,
31 .write_reg
= qlcnic_83xx_wrt_reg_indirect
,
32 .get_mac_address
= qlcnic_83xx_get_mac_address
,
33 .setup_intr
= qlcnic_83xx_setup_intr
,
34 .alloc_mbx_args
= qlcnic_83xx_alloc_mbx_args
,
35 .mbx_cmd
= qlcnic_sriov_vf_mbx_op
,
36 .get_func_no
= qlcnic_83xx_get_func_no
,
37 .api_lock
= qlcnic_83xx_cam_lock
,
38 .api_unlock
= qlcnic_83xx_cam_unlock
,
39 .process_lb_rcv_ring_diag
= qlcnic_83xx_process_rcv_ring_diag
,
40 .create_rx_ctx
= qlcnic_83xx_create_rx_ctx
,
41 .create_tx_ctx
= qlcnic_83xx_create_tx_ctx
,
42 .setup_link_event
= qlcnic_83xx_setup_link_event
,
43 .get_nic_info
= qlcnic_83xx_get_nic_info
,
44 .get_pci_info
= qlcnic_83xx_get_pci_info
,
45 .set_nic_info
= qlcnic_83xx_set_nic_info
,
46 .change_macvlan
= qlcnic_83xx_sre_macaddr_change
,
47 .napi_enable
= qlcnic_83xx_napi_enable
,
48 .napi_disable
= qlcnic_83xx_napi_disable
,
49 .config_intr_coal
= qlcnic_83xx_config_intr_coal
,
50 .config_rss
= qlcnic_83xx_config_rss
,
51 .config_hw_lro
= qlcnic_83xx_config_hw_lro
,
52 .config_promisc_mode
= qlcnic_83xx_nic_set_promisc
,
53 .change_l2_filter
= qlcnic_83xx_change_l2_filter
,
54 .get_board_info
= qlcnic_83xx_get_port_info
,
57 static struct qlcnic_nic_template qlcnic_sriov_vf_ops
= {
58 .config_bridged_mode
= qlcnic_config_bridged_mode
,
59 .config_led
= qlcnic_config_led
,
60 .cancel_idc_work
= qlcnic_83xx_idc_exit
,
61 .napi_add
= qlcnic_83xx_napi_add
,
62 .napi_del
= qlcnic_83xx_napi_del
,
63 .config_ipaddr
= qlcnic_83xx_config_ipaddr
,
64 .clear_legacy_intr
= qlcnic_83xx_clear_legacy_intr
,
67 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl
[] = {
68 {QLCNIC_BC_CMD_CHANNEL_INIT
, 2, 2},
69 {QLCNIC_BC_CMD_CHANNEL_TERM
, 2, 2},
72 static inline bool qlcnic_sriov_bc_msg_check(u32 val
)
74 return (val
& (1 << QLC_BC_MSG
)) ? true : false;
77 static inline bool qlcnic_sriov_channel_free_check(u32 val
)
79 return (val
& (1 << QLC_BC_CFREE
)) ? true : false;
82 static inline u8
qlcnic_sriov_target_func_id(u32 val
)
84 return (val
>> 4) & 0xff;
87 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter
*adapter
, int vf_id
)
89 struct pci_dev
*dev
= adapter
->pdev
;
93 if (qlcnic_sriov_vf_check(adapter
))
96 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
97 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
98 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
100 return (dev
->devfn
+ offset
+ stride
* vf_id
) & 0xff;
103 int qlcnic_sriov_init(struct qlcnic_adapter
*adapter
, int num_vfs
)
105 struct qlcnic_sriov
*sriov
;
106 struct qlcnic_back_channel
*bc
;
107 struct workqueue_struct
*wq
;
108 struct qlcnic_vport
*vp
;
109 struct qlcnic_vf_info
*vf
;
112 if (!qlcnic_sriov_enable_check(adapter
))
115 sriov
= kzalloc(sizeof(struct qlcnic_sriov
), GFP_KERNEL
);
119 adapter
->ahw
->sriov
= sriov
;
120 sriov
->num_vfs
= num_vfs
;
122 sriov
->vf_info
= kzalloc(sizeof(struct qlcnic_vf_info
) *
123 num_vfs
, GFP_KERNEL
);
124 if (!sriov
->vf_info
) {
126 goto qlcnic_free_sriov
;
129 wq
= create_singlethread_workqueue("bc-trans");
132 dev_err(&adapter
->pdev
->dev
,
133 "Cannot create bc-trans workqueue\n");
134 goto qlcnic_free_vf_info
;
137 bc
->bc_trans_wq
= wq
;
139 for (i
= 0; i
< num_vfs
; i
++) {
140 vf
= &sriov
->vf_info
[i
];
141 vf
->adapter
= adapter
;
142 vf
->pci_func
= qlcnic_sriov_virtid_fn(adapter
, i
);
143 mutex_init(&vf
->send_cmd_lock
);
144 INIT_LIST_HEAD(&vf
->rcv_act
.wait_list
);
145 INIT_LIST_HEAD(&vf
->rcv_pend
.wait_list
);
146 spin_lock_init(&vf
->rcv_act
.lock
);
147 spin_lock_init(&vf
->rcv_pend
.lock
);
148 init_completion(&vf
->ch_free_cmpl
);
150 if (qlcnic_sriov_pf_check(adapter
)) {
151 vp
= kzalloc(sizeof(struct qlcnic_vport
), GFP_KERNEL
);
154 goto qlcnic_destroy_trans_wq
;
156 sriov
->vf_info
[i
].vp
= vp
;
157 random_ether_addr(vp
->mac
);
158 dev_info(&adapter
->pdev
->dev
,
159 "MAC Address %pM is configured for VF %d\n",
166 qlcnic_destroy_trans_wq
:
167 destroy_workqueue(bc
->bc_trans_wq
);
170 kfree(sriov
->vf_info
);
173 kfree(adapter
->ahw
->sriov
);
177 void __qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
179 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
180 struct qlcnic_back_channel
*bc
= &sriov
->bc
;
183 if (!qlcnic_sriov_enable_check(adapter
))
186 destroy_workqueue(bc
->bc_trans_wq
);
188 for (i
= 0; i
< sriov
->num_vfs
; i
++)
189 kfree(sriov
->vf_info
[i
].vp
);
191 kfree(sriov
->vf_info
);
192 kfree(adapter
->ahw
->sriov
);
195 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter
*adapter
)
197 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
198 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
199 __qlcnic_sriov_cleanup(adapter
);
202 void qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
204 if (qlcnic_sriov_pf_check(adapter
))
205 qlcnic_sriov_pf_cleanup(adapter
);
207 if (qlcnic_sriov_vf_check(adapter
))
208 qlcnic_sriov_vf_cleanup(adapter
);
211 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter
*adapter
, u32
*hdr
,
212 u32
*pay
, u8 pci_func
, u8 size
)
214 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
216 u32 rsp
, mbx_val
, fw_data
, rsp_num
, mbx_cmd
, val
;
221 opcode
= ((struct qlcnic_bc_hdr
*)hdr
)->cmd_op
;
223 if (!test_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
)) {
224 dev_info(&adapter
->pdev
->dev
,
225 "Mailbox cmd attempted, 0x%x\n", opcode
);
226 dev_info(&adapter
->pdev
->dev
, "Mailbox detached\n");
230 spin_lock_irqsave(&ahw
->mbx_lock
, flags
);
232 mbx_val
= QLCRDX(ahw
, QLCNIC_HOST_MBX_CTRL
);
234 QLCDB(adapter
, DRV
, "Mailbox cmd attempted, 0x%x\n", opcode
);
235 spin_unlock_irqrestore(&ahw
->mbx_lock
, flags
);
236 return QLCNIC_RCODE_TIMEOUT
;
238 /* Fill in mailbox registers */
239 val
= size
+ (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
240 mbx_cmd
= 0x31 | (val
<< 16) | (adapter
->ahw
->fw_hal_version
<< 29);
242 writel(mbx_cmd
, QLCNIC_MBX_HOST(ahw
, 0));
243 mbx_cmd
= 0x1 | (1 << 4);
245 if (qlcnic_sriov_pf_check(adapter
))
246 mbx_cmd
|= (pci_func
<< 5);
248 writel(mbx_cmd
, QLCNIC_MBX_HOST(ahw
, 1));
249 for (i
= 2, j
= 0; j
< (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
251 writel(*(hdr
++), QLCNIC_MBX_HOST(ahw
, i
));
253 for (j
= 0; j
< size
; j
++, i
++)
254 writel(*(pay
++), QLCNIC_MBX_HOST(ahw
, i
));
256 /* Signal FW about the impending command */
257 QLCWRX(ahw
, QLCNIC_HOST_MBX_CTRL
, QLCNIC_SET_OWNER
);
259 /* Waiting for the mailbox cmd to complete and while waiting here
260 * some AEN might arrive. If more than 5 seconds expire we can
261 * assume something is wrong.
264 rsp
= qlcnic_83xx_mbx_poll(adapter
);
265 if (rsp
!= QLCNIC_RCODE_TIMEOUT
) {
266 /* Get the FW response data */
267 fw_data
= readl(QLCNIC_MBX_FW(ahw
, 0));
268 if (fw_data
& QLCNIC_MBX_ASYNC_EVENT
) {
269 qlcnic_83xx_process_aen(adapter
);
270 mbx_val
= QLCRDX(ahw
, QLCNIC_HOST_MBX_CTRL
);
274 mbx_err_code
= QLCNIC_MBX_STATUS(fw_data
);
275 rsp_num
= QLCNIC_MBX_NUM_REGS(fw_data
);
276 opcode
= QLCNIC_MBX_RSP(fw_data
);
278 switch (mbx_err_code
) {
279 case QLCNIC_MBX_RSP_OK
:
280 case QLCNIC_MBX_PORT_RSP_OK
:
281 rsp
= QLCNIC_RCODE_SUCCESS
;
284 if (opcode
== QLCNIC_CMD_CONFIG_MAC_VLAN
) {
285 rsp
= qlcnic_83xx_mac_rcode(adapter
);
289 dev_err(&adapter
->pdev
->dev
,
290 "MBX command 0x%x failed with err:0x%x\n",
291 opcode
, mbx_err_code
);
298 dev_err(&adapter
->pdev
->dev
, "MBX command 0x%x timed out\n",
299 QLCNIC_MBX_RSP(mbx_cmd
));
300 rsp
= QLCNIC_RCODE_TIMEOUT
;
302 /* clear fw mbx control register */
303 QLCWRX(ahw
, QLCNIC_FW_MBX_CTRL
, QLCNIC_CLR_OWNER
);
304 spin_unlock_irqrestore(&adapter
->ahw
->mbx_lock
, flags
);
308 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter
*adapter
,
313 if (!qlcnic_use_msi_x
&& !!qlcnic_use_msi
)
314 dev_warn(&adapter
->pdev
->dev
,
315 "83xx adapter do not support MSI interrupts\n");
317 err
= qlcnic_setup_intr(adapter
, 1);
319 dev_err(&adapter
->pdev
->dev
, "Failed to setup interrupt\n");
320 goto err_out_disable_msi
;
323 err
= qlcnic_83xx_setup_mbx_intr(adapter
);
325 goto err_out_disable_msi
;
327 err
= qlcnic_sriov_init(adapter
, 1);
329 goto err_out_disable_mbx_intr
;
331 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
333 goto err_out_cleanup_sriov
;
335 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
337 goto err_out_disable_bc_intr
;
339 err
= qlcnic_setup_netdev(adapter
, adapter
->netdev
, pci_using_dac
);
341 goto err_out_send_channel_term
;
343 pci_set_drvdata(adapter
->pdev
, adapter
);
344 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
345 adapter
->netdev
->name
);
348 err_out_send_channel_term
:
349 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
351 err_out_disable_bc_intr
:
352 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
354 err_out_cleanup_sriov
:
355 __qlcnic_sriov_cleanup(adapter
);
357 err_out_disable_mbx_intr
:
358 qlcnic_83xx_free_mbx_intr(adapter
);
361 qlcnic_teardown_intr(adapter
);
365 int qlcnic_sriov_vf_init(struct qlcnic_adapter
*adapter
, int pci_using_dac
)
367 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
369 spin_lock_init(&ahw
->mbx_lock
);
370 set_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
);
371 ahw
->msix_supported
= 1;
372 adapter
->flags
|= QLCNIC_TX_INTR_SHARED
;
374 if (qlcnic_sriov_setup_vf(adapter
, pci_using_dac
))
377 if (qlcnic_read_mac_addr(adapter
))
378 dev_warn(&adapter
->pdev
->dev
, "failed to read mac addr\n");
380 set_bit(QLC_83XX_MODULE_LOADED
, &adapter
->ahw
->idc
.status
);
381 adapter
->ahw
->idc
.delay
= QLC_83XX_IDC_FW_POLL_DELAY
;
382 adapter
->ahw
->reset_context
= 0;
383 adapter
->fw_fail_cnt
= 0;
384 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
385 adapter
->need_fw_reset
= 0;
389 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter
*adapter
)
391 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
393 ahw
->op_mode
= QLCNIC_SRIOV_VF_FUNC
;
394 dev_info(&adapter
->pdev
->dev
,
395 "HAL Version: %d Non Privileged SRIOV function\n",
396 ahw
->fw_hal_version
);
397 adapter
->nic_ops
= &qlcnic_sriov_vf_ops
;
398 set_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
);
402 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context
*ahw
)
404 ahw
->hw_ops
= &qlcnic_sriov_vf_hw_ops
;
405 ahw
->reg_tbl
= (u32
*)qlcnic_83xx_reg_tbl
;
406 ahw
->ext_reg_tbl
= (u32
*)qlcnic_83xx_ext_reg_tbl
;
409 static u32
qlcnic_sriov_get_bc_paysize(u32 real_pay_size
, u8 curr_frag
)
413 pay_size
= real_pay_size
/ ((curr_frag
+ 1) * QLC_BC_PAYLOAD_SZ
);
416 pay_size
= QLC_BC_PAYLOAD_SZ
;
418 pay_size
= real_pay_size
% QLC_BC_PAYLOAD_SZ
;
423 int qlcnic_sriov_func_to_index(struct qlcnic_adapter
*adapter
, u8 pci_func
)
425 struct qlcnic_vf_info
*vf_info
= adapter
->ahw
->sriov
->vf_info
;
428 if (qlcnic_sriov_vf_check(adapter
))
431 for (i
= 0; i
< adapter
->ahw
->sriov
->num_vfs
; i
++) {
432 if (vf_info
[i
].pci_func
== pci_func
)
439 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans
**trans
)
441 *trans
= kzalloc(sizeof(struct qlcnic_bc_trans
), GFP_ATOMIC
);
445 init_completion(&(*trans
)->resp_cmpl
);
449 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr
**hdr
,
452 *hdr
= kzalloc(sizeof(struct qlcnic_bc_hdr
) * size
, GFP_ATOMIC
);
459 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*mbx
, u32 type
)
461 const struct qlcnic_mailbox_metadata
*mbx_tbl
;
464 mbx_tbl
= qlcnic_sriov_bc_mbx_tbl
;
465 size
= ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl
);
467 for (i
= 0; i
< size
; i
++) {
468 if (type
== mbx_tbl
[i
].cmd
) {
469 mbx
->op_type
= QLC_BC_CMD
;
470 mbx
->req
.num
= mbx_tbl
[i
].in_args
;
471 mbx
->rsp
.num
= mbx_tbl
[i
].out_args
;
472 mbx
->req
.arg
= kcalloc(mbx
->req
.num
, sizeof(u32
),
476 mbx
->rsp
.arg
= kcalloc(mbx
->rsp
.num
, sizeof(u32
),
483 memset(mbx
->req
.arg
, 0, sizeof(u32
) * mbx
->req
.num
);
484 memset(mbx
->rsp
.arg
, 0, sizeof(u32
) * mbx
->rsp
.num
);
485 mbx
->req
.arg
[0] = (type
| (mbx
->req
.num
<< 16) |
493 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans
*trans
,
494 struct qlcnic_cmd_args
*cmd
,
495 u16 seq
, u8 msg_type
)
497 struct qlcnic_bc_hdr
*hdr
;
499 u32 num_regs
, bc_pay_sz
;
501 u8 cmd_op
, num_frags
, t_num_frags
;
503 bc_pay_sz
= QLC_BC_PAYLOAD_SZ
;
504 if (msg_type
== QLC_BC_COMMAND
) {
505 trans
->req_pay
= (struct qlcnic_bc_payload
*)cmd
->req
.arg
;
506 trans
->rsp_pay
= (struct qlcnic_bc_payload
*)cmd
->rsp
.arg
;
507 num_regs
= cmd
->req
.num
;
508 trans
->req_pay_size
= (num_regs
* 4);
509 num_regs
= cmd
->rsp
.num
;
510 trans
->rsp_pay_size
= (num_regs
* 4);
511 cmd_op
= cmd
->req
.arg
[0] & 0xff;
512 remainder
= (trans
->req_pay_size
) % (bc_pay_sz
);
513 num_frags
= (trans
->req_pay_size
) / (bc_pay_sz
);
516 t_num_frags
= num_frags
;
517 if (qlcnic_sriov_alloc_bc_msg(&trans
->req_hdr
, num_frags
))
519 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
520 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
523 if (qlcnic_sriov_alloc_bc_msg(&trans
->rsp_hdr
, num_frags
))
525 num_frags
= t_num_frags
;
526 hdr
= trans
->req_hdr
;
528 cmd
->req
.arg
= (u32
*)trans
->req_pay
;
529 cmd
->rsp
.arg
= (u32
*)trans
->rsp_pay
;
530 cmd_op
= cmd
->req
.arg
[0] & 0xff;
531 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
532 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
535 cmd
->req
.num
= trans
->req_pay_size
/ 4;
536 cmd
->rsp
.num
= trans
->rsp_pay_size
/ 4;
537 hdr
= trans
->rsp_hdr
;
540 trans
->trans_id
= seq
;
541 trans
->cmd_id
= cmd_op
;
542 for (i
= 0; i
< num_frags
; i
++) {
544 hdr
[i
].msg_type
= msg_type
;
545 hdr
[i
].op_type
= cmd
->op_type
;
547 hdr
[i
].num_frags
= num_frags
;
548 hdr
[i
].frag_num
= i
+ 1;
549 hdr
[i
].cmd_op
= cmd_op
;
555 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*trans
)
559 kfree(trans
->req_hdr
);
560 kfree(trans
->rsp_hdr
);
564 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info
*vf
,
565 struct qlcnic_bc_trans
*trans
, u8 type
)
567 struct qlcnic_trans_list
*t_list
;
571 if (type
== QLC_BC_RESPONSE
) {
572 t_list
= &vf
->rcv_act
;
573 spin_lock_irqsave(&t_list
->lock
, flags
);
575 list_del(&trans
->list
);
576 if (t_list
->count
> 0)
578 spin_unlock_irqrestore(&t_list
->lock
, flags
);
580 if (type
== QLC_BC_COMMAND
) {
581 while (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
584 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
589 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov
*sriov
,
590 struct qlcnic_vf_info
*vf
,
593 INIT_WORK(&vf
->trans_work
, func
);
594 queue_work(sriov
->bc
.bc_trans_wq
, &vf
->trans_work
);
597 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans
*trans
)
599 struct completion
*cmpl
= &trans
->resp_cmpl
;
601 if (wait_for_completion_timeout(cmpl
, QLC_MBOX_RESP_TIMEOUT
))
602 trans
->trans_state
= QLC_END
;
604 trans
->trans_state
= QLC_ABORT
;
609 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans
*trans
,
612 if (type
== QLC_BC_RESPONSE
) {
613 trans
->curr_rsp_frag
++;
614 if (trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
615 trans
->trans_state
= QLC_INIT
;
617 trans
->trans_state
= QLC_END
;
619 trans
->curr_req_frag
++;
620 if (trans
->curr_req_frag
< trans
->req_hdr
->num_frags
)
621 trans
->trans_state
= QLC_INIT
;
623 trans
->trans_state
= QLC_WAIT_FOR_RESP
;
627 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans
*trans
,
630 struct qlcnic_vf_info
*vf
= trans
->vf
;
631 struct completion
*cmpl
= &vf
->ch_free_cmpl
;
633 if (!wait_for_completion_timeout(cmpl
, QLC_MBOX_CH_FREE_TIMEOUT
)) {
634 trans
->trans_state
= QLC_ABORT
;
638 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
639 qlcnic_sriov_handle_multi_frags(trans
, type
);
642 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter
*adapter
,
643 u32
*hdr
, u32
*pay
, u32 size
)
645 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
647 u8 i
, max
= 2, hdr_size
, j
;
649 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
650 max
= (size
/ sizeof(u32
)) + hdr_size
;
652 fw_mbx
= readl(QLCNIC_MBX_FW(ahw
, 0));
653 for (i
= 2, j
= 0; j
< hdr_size
; i
++, j
++)
654 *(hdr
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
655 for (; j
< max
; i
++, j
++)
656 *(pay
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
659 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info
*vf
)
665 if (!test_and_set_bit(QLC_BC_VF_CHANNEL
, &vf
->state
)) {
675 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans
*trans
, u8 type
)
677 struct qlcnic_vf_info
*vf
= trans
->vf
;
678 u32 pay_size
, hdr_size
;
681 u8 pci_func
= trans
->func_id
;
683 if (__qlcnic_sriov_issue_bc_post(vf
))
686 if (type
== QLC_BC_COMMAND
) {
687 hdr
= (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
);
688 pay
= (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
);
689 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
690 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
691 trans
->curr_req_frag
);
692 pay_size
= (pay_size
/ sizeof(u32
));
694 hdr
= (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
);
695 pay
= (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
);
696 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
697 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
698 trans
->curr_rsp_frag
);
699 pay_size
= (pay_size
/ sizeof(u32
));
702 ret
= qlcnic_sriov_post_bc_msg(vf
->adapter
, hdr
, pay
,
707 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans
*trans
,
708 struct qlcnic_vf_info
*vf
, u8 type
)
714 switch (trans
->trans_state
) {
716 trans
->trans_state
= QLC_WAIT_FOR_CHANNEL_FREE
;
717 if (qlcnic_sriov_issue_bc_post(trans
, type
))
718 trans
->trans_state
= QLC_ABORT
;
720 case QLC_WAIT_FOR_CHANNEL_FREE
:
721 qlcnic_sriov_wait_for_channel_free(trans
, type
);
723 case QLC_WAIT_FOR_RESP
:
724 qlcnic_sriov_wait_for_resp(trans
);
733 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
743 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter
*adapter
,
744 struct qlcnic_bc_trans
*trans
, int pci_func
)
746 struct qlcnic_vf_info
*vf
;
747 int err
, index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
752 vf
= &adapter
->ahw
->sriov
->vf_info
[index
];
754 trans
->func_id
= pci_func
;
756 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
)) {
757 if (qlcnic_sriov_pf_check(adapter
))
759 if (qlcnic_sriov_vf_check(adapter
) &&
760 trans
->cmd_id
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
764 mutex_lock(&vf
->send_cmd_lock
);
765 vf
->send_cmd
= trans
;
766 err
= __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_COMMAND
);
767 qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_COMMAND
);
768 mutex_unlock(&vf
->send_cmd_lock
);
772 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter
*adapter
,
773 struct qlcnic_bc_trans
*trans
,
774 struct qlcnic_cmd_args
*cmd
)
776 #ifdef CONFIG_QLCNIC_SRIOV
777 if (qlcnic_sriov_pf_check(adapter
)) {
778 qlcnic_sriov_pf_process_bc_cmd(adapter
, trans
, cmd
);
782 cmd
->rsp
.arg
[0] |= (0x9 << 25);
786 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*work
)
788 struct qlcnic_vf_info
*vf
= container_of(work
, struct qlcnic_vf_info
,
790 struct qlcnic_bc_trans
*trans
= NULL
;
791 struct qlcnic_adapter
*adapter
= vf
->adapter
;
792 struct qlcnic_cmd_args cmd
;
795 trans
= list_first_entry(&vf
->rcv_act
.wait_list
,
796 struct qlcnic_bc_trans
, list
);
797 adapter
= vf
->adapter
;
799 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, trans
->req_hdr
->seq_id
,
803 __qlcnic_sriov_process_bc_cmd(adapter
, trans
, &cmd
);
804 trans
->trans_state
= QLC_INIT
;
805 __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_RESPONSE
);
808 qlcnic_free_mbx_args(&cmd
);
809 req
= qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_RESPONSE
);
810 qlcnic_sriov_cleanup_transaction(trans
);
812 qlcnic_sriov_schedule_bc_cmd(adapter
->ahw
->sriov
, vf
,
813 qlcnic_sriov_process_bc_cmd
);
816 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr
*hdr
,
817 struct qlcnic_vf_info
*vf
)
819 struct qlcnic_bc_trans
*trans
;
822 if (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
825 trans
= vf
->send_cmd
;
830 if (trans
->trans_id
!= hdr
->seq_id
)
833 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
834 trans
->curr_rsp_frag
);
835 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
836 (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
),
837 (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
),
839 if (++trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
842 complete(&trans
->resp_cmpl
);
845 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
848 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
849 struct qlcnic_vf_info
*vf
,
850 struct qlcnic_bc_trans
*trans
)
852 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
854 spin_lock(&t_list
->lock
);
856 list_add_tail(&trans
->list
, &t_list
->wait_list
);
857 if (t_list
->count
== 1)
858 qlcnic_sriov_schedule_bc_cmd(sriov
, vf
,
859 qlcnic_sriov_process_bc_cmd
);
860 spin_unlock(&t_list
->lock
);
864 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov
*sriov
,
865 struct qlcnic_vf_info
*vf
,
866 struct qlcnic_bc_hdr
*hdr
)
868 struct qlcnic_bc_trans
*trans
= NULL
;
869 struct list_head
*node
;
870 u32 pay_size
, curr_frag
;
871 u8 found
= 0, active
= 0;
873 spin_lock(&vf
->rcv_pend
.lock
);
874 if (vf
->rcv_pend
.count
> 0) {
875 list_for_each(node
, &vf
->rcv_pend
.wait_list
) {
876 trans
= list_entry(node
, struct qlcnic_bc_trans
, list
);
877 if (trans
->trans_id
== hdr
->seq_id
) {
885 curr_frag
= trans
->curr_req_frag
;
886 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
888 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
889 (u32
*)(trans
->req_hdr
+ curr_frag
),
890 (u32
*)(trans
->req_pay
+ curr_frag
),
892 trans
->curr_req_frag
++;
893 if (trans
->curr_req_frag
>= hdr
->num_frags
) {
894 vf
->rcv_pend
.count
--;
895 list_del(&trans
->list
);
899 spin_unlock(&vf
->rcv_pend
.lock
);
902 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
))
903 qlcnic_sriov_cleanup_transaction(trans
);
908 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov
*sriov
,
909 struct qlcnic_bc_hdr
*hdr
,
910 struct qlcnic_vf_info
*vf
)
912 struct qlcnic_bc_trans
*trans
;
913 struct qlcnic_adapter
*adapter
= vf
->adapter
;
914 struct qlcnic_cmd_args cmd
;
919 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
) &&
920 hdr
->op_type
!= QLC_BC_CMD
&&
921 hdr
->cmd_op
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
924 if (hdr
->frag_num
> 1) {
925 qlcnic_sriov_handle_pending_trans(sriov
, vf
, hdr
);
929 cmd_op
= hdr
->cmd_op
;
930 if (qlcnic_sriov_alloc_bc_trans(&trans
))
933 if (hdr
->op_type
== QLC_BC_CMD
)
934 err
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
);
936 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, cmd_op
);
939 qlcnic_sriov_cleanup_transaction(trans
);
943 cmd
.op_type
= hdr
->op_type
;
944 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, hdr
->seq_id
,
946 qlcnic_free_mbx_args(&cmd
);
947 qlcnic_sriov_cleanup_transaction(trans
);
951 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
952 trans
->curr_req_frag
);
953 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
954 (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
),
955 (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
),
957 trans
->func_id
= vf
->pci_func
;
959 trans
->trans_id
= hdr
->seq_id
;
960 trans
->curr_req_frag
++;
961 if (trans
->curr_req_frag
== trans
->req_hdr
->num_frags
) {
962 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
)) {
963 qlcnic_free_mbx_args(&cmd
);
964 qlcnic_sriov_cleanup_transaction(trans
);
967 spin_lock(&vf
->rcv_pend
.lock
);
968 list_add_tail(&trans
->list
, &vf
->rcv_pend
.wait_list
);
969 vf
->rcv_pend
.count
++;
970 spin_unlock(&vf
->rcv_pend
.lock
);
974 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov
*sriov
,
975 struct qlcnic_vf_info
*vf
)
977 struct qlcnic_bc_hdr hdr
;
978 u32
*ptr
= (u32
*)&hdr
;
981 for (i
= 2; i
< 6; i
++)
982 ptr
[i
- 2] = readl(QLCNIC_MBX_FW(vf
->adapter
->ahw
, i
));
983 msg_type
= hdr
.msg_type
;
987 qlcnic_sriov_handle_bc_cmd(sriov
, &hdr
, vf
);
989 case QLC_BC_RESPONSE
:
990 qlcnic_sriov_handle_bc_resp(&hdr
, vf
);
995 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter
*adapter
, u32 event
)
997 struct qlcnic_vf_info
*vf
;
998 struct qlcnic_sriov
*sriov
;
1002 sriov
= adapter
->ahw
->sriov
;
1003 pci_func
= qlcnic_sriov_target_func_id(event
);
1004 index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1009 vf
= &sriov
->vf_info
[index
];
1010 vf
->pci_func
= pci_func
;
1012 if (qlcnic_sriov_channel_free_check(event
))
1013 complete(&vf
->ch_free_cmpl
);
1015 if (qlcnic_sriov_bc_msg_check(event
))
1016 qlcnic_sriov_handle_msg_event(sriov
, vf
);
1019 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter
*adapter
, u8 enable
)
1021 struct qlcnic_cmd_args cmd
;
1024 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
1027 if (qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_BC_EVENT_SETUP
))
1031 cmd
.req
.arg
[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1033 err
= qlcnic_83xx_mbx_op(adapter
, &cmd
);
1035 if (err
!= QLCNIC_RCODE_SUCCESS
) {
1036 dev_err(&adapter
->pdev
->dev
,
1037 "Failed to %s bc events, err=%d\n",
1038 (enable
? "enable" : "disable"), err
);
1041 qlcnic_free_mbx_args(&cmd
);
1045 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter
*adapter
,
1046 struct qlcnic_cmd_args
*cmd
)
1048 struct qlcnic_bc_trans
*trans
;
1050 u32 rsp_data
, opcode
, mbx_err_code
, rsp
;
1051 u16 seq
= ++adapter
->ahw
->sriov
->bc
.trans_counter
;
1053 if (qlcnic_sriov_alloc_bc_trans(&trans
))
1056 if (qlcnic_sriov_prepare_bc_hdr(trans
, cmd
, seq
, QLC_BC_COMMAND
))
1059 if (!test_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
)) {
1061 QLCDB(adapter
, DRV
, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1062 QLCNIC_MBX_RSP(cmd
->req
.arg
[0]), adapter
->ahw
->pci_func
);
1066 err
= qlcnic_sriov_send_bc_cmd(adapter
, trans
, adapter
->ahw
->pci_func
);
1068 dev_err(&adapter
->pdev
->dev
,
1069 "MBX command 0x%x timed out for VF %d\n",
1070 (cmd
->req
.arg
[0] & 0xffff), adapter
->ahw
->pci_func
);
1071 rsp
= QLCNIC_RCODE_TIMEOUT
;
1075 rsp_data
= cmd
->rsp
.arg
[0];
1076 mbx_err_code
= QLCNIC_MBX_STATUS(rsp_data
);
1077 opcode
= QLCNIC_MBX_RSP(cmd
->req
.arg
[0]);
1079 if ((mbx_err_code
== QLCNIC_MBX_RSP_OK
) ||
1080 (mbx_err_code
== QLCNIC_MBX_PORT_RSP_OK
)) {
1081 rsp
= QLCNIC_RCODE_SUCCESS
;
1086 dev_err(&adapter
->pdev
->dev
,
1087 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1088 opcode
, mbx_err_code
, adapter
->ahw
->pci_func
);
1092 qlcnic_sriov_cleanup_transaction(trans
);
1096 int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*adapter
, u8 cmd_op
)
1098 struct qlcnic_cmd_args cmd
;
1099 struct qlcnic_vf_info
*vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1102 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
))
1105 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
1107 dev_err(&adapter
->pdev
->dev
,
1108 "Failed bc channel %s %d\n", cmd_op
? "term" : "init",
1113 cmd_op
= (cmd
.rsp
.arg
[0] & 0xff);
1114 if (cmd
.rsp
.arg
[0] >> 25 == 2)
1116 if (cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
)
1117 set_bit(QLC_BC_VF_STATE
, &vf
->state
);
1119 clear_bit(QLC_BC_VF_STATE
, &vf
->state
);
1122 qlcnic_free_mbx_args(&cmd
);