1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include <linux/module.h>
11 #include "net_driver.h"
17 #include "mcdi_pcol.h"
21 /* Number of longs required to track all the VIs in a VF */
22 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
25 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
26 * @VF_TX_FILTER_OFF: Disabled
27 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
28 * 2 TX queues allowed per VF.
29 * @VF_TX_FILTER_ON: Enabled
31 enum efx_vf_tx_filter_mode
{
38 * struct efx_vf - Back-end resource and protocol state for a PCI VF
39 * @efx: The Efx NIC owning this VF
40 * @pci_rid: The PCI requester ID for this VF
41 * @pci_name: The PCI name (formatted address) of this VF
42 * @index: Index of VF within its port and PF.
43 * @req: VFDI incoming request work item. Incoming USR_EV events are received
44 * by the NAPI handler, but must be handled by executing MCDI requests
46 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
47 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
48 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
49 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
51 * @busy: VFDI request queued to be processed or being processed. Receiving
52 * a VFDI request when @busy is set is an error condition.
53 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
54 * @buftbl_base: Buffer table entries for this VF start at this index.
55 * @rx_filtering: Receive filtering has been requested by the VF driver.
56 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
57 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
58 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
59 * @tx_filter_mode: Transmit MAC filtering mode.
60 * @tx_filter_id: Transmit MAC filter ID.
61 * @addr: The MAC address and outer vlan tag of the VF.
62 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
63 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
64 * @peer_page_addrs and @peer_page_count from simultaneous
65 * updates by the VM and consumption by
66 * efx_sriov_update_vf_addr()
67 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
68 * @peer_page_count: Number of entries in @peer_page_count.
69 * @evq0_addrs: Array of guest pages backing evq0.
70 * @evq0_count: Number of entries in @evq0_addrs.
71 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
72 * to wait for flush completions.
73 * @txq_lock: Mutex for TX queue allocation.
74 * @txq_mask: Mask of initialized transmit queues.
75 * @txq_count: Number of initialized transmit queues.
76 * @rxq_mask: Mask of initialized receive queues.
77 * @rxq_count: Number of initialized receive queues.
78 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
79 * due to flush failure.
80 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
81 * @reset_work: Work item to schedule a VF reset.
86 char pci_name
[13]; /* dddd:bb:dd.f */
88 struct work_struct req
;
94 struct efx_buffer buf
;
97 enum efx_filter_flags rx_filter_flags
;
98 unsigned rx_filter_qid
;
100 enum efx_vf_tx_filter_mode tx_filter_mode
;
102 struct vfdi_endpoint addr
;
104 struct mutex status_lock
;
105 u64
*peer_page_addrs
;
106 unsigned peer_page_count
;
107 u64 evq0_addrs
[EFX_MAX_VF_EVQ_SIZE
* sizeof(efx_qword_t
) /
110 wait_queue_head_t flush_waitq
;
111 struct mutex txq_lock
;
112 unsigned long txq_mask
[VI_MASK_LENGTH
];
114 unsigned long rxq_mask
[VI_MASK_LENGTH
];
116 unsigned long rxq_retry_mask
[VI_MASK_LENGTH
];
117 atomic_t rxq_retry_count
;
118 struct work_struct reset_work
;
121 struct efx_memcpy_req
{
122 unsigned int from_rid
;
131 * struct efx_local_addr - A MAC address on the vswitch without a VF.
133 * Siena does not have a switch, so VFs can't transmit data to each
134 * other. Instead the VFs must be made aware of the local addresses
135 * on the vswitch, so that they can arrange for an alternative
136 * software datapath to be used.
138 * @link: List head for insertion into efx->local_addr_list.
139 * @addr: Ethernet address
141 struct efx_local_addr
{
142 struct list_head link
;
147 * struct efx_endpoint_page - Page of vfdi_endpoint structures
149 * @link: List head for insertion into efx->local_page_list.
150 * @ptr: Pointer to page.
151 * @addr: DMA address of page.
153 struct efx_endpoint_page
{
154 struct list_head link
;
159 /* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
160 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
161 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
162 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
163 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
164 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
165 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
169 #define EFX_FIELD_MASK(_field) \
170 ((1 << _field ## _WIDTH) - 1)
172 /* VFs can only use this many transmit channels */
173 static unsigned int vf_max_tx_channels
= 2;
174 module_param(vf_max_tx_channels
, uint
, 0444);
175 MODULE_PARM_DESC(vf_max_tx_channels
,
176 "Limit the number of TX channels VFs can use");
178 static int max_vfs
= -1;
179 module_param(max_vfs
, int, 0444);
180 MODULE_PARM_DESC(max_vfs
,
181 "Reduce the number of VFs initialized by the driver");
183 /* Workqueue used by VFDI communication. We can't use the global
184 * workqueue because it may be running the VF driver's probe()
185 * routine, which will be blocked there waiting for a VFDI response.
187 static struct workqueue_struct
*vfdi_workqueue
;
189 static unsigned abs_index(struct efx_vf
*vf
, unsigned index
)
191 return EFX_VI_BASE
+ vf
->index
* efx_vf_size(vf
->efx
) + index
;
194 static int efx_sriov_cmd(struct efx_nic
*efx
, bool enable
,
195 unsigned *vi_scale_out
, unsigned *vf_total_out
)
197 u8 inbuf
[MC_CMD_SRIOV_IN_LEN
];
198 u8 outbuf
[MC_CMD_SRIOV_OUT_LEN
];
199 unsigned vi_scale
, vf_total
;
203 MCDI_SET_DWORD(inbuf
, SRIOV_IN_ENABLE
, enable
? 1 : 0);
204 MCDI_SET_DWORD(inbuf
, SRIOV_IN_VI_BASE
, EFX_VI_BASE
);
205 MCDI_SET_DWORD(inbuf
, SRIOV_IN_VF_COUNT
, efx
->vf_count
);
207 rc
= efx_mcdi_rpc(efx
, MC_CMD_SRIOV
, inbuf
, MC_CMD_SRIOV_IN_LEN
,
208 outbuf
, MC_CMD_SRIOV_OUT_LEN
, &outlen
);
211 if (outlen
< MC_CMD_SRIOV_OUT_LEN
)
214 vf_total
= MCDI_DWORD(outbuf
, SRIOV_OUT_VF_TOTAL
);
215 vi_scale
= MCDI_DWORD(outbuf
, SRIOV_OUT_VI_SCALE
);
216 if (vi_scale
> EFX_VI_SCALE_MAX
)
220 *vi_scale_out
= vi_scale
;
222 *vf_total_out
= vf_total
;
227 static void efx_sriov_usrev(struct efx_nic
*efx
, bool enabled
)
231 EFX_POPULATE_OWORD_2(reg
,
232 FRF_CZ_USREV_DIS
, enabled
? 0 : 1,
233 FRF_CZ_DFLT_EVQ
, efx
->vfdi_channel
->channel
);
234 efx_writeo(efx
, ®
, FR_CZ_USR_EV_CFG
);
237 static int efx_sriov_memcpy(struct efx_nic
*efx
, struct efx_memcpy_req
*req
,
242 u32 from_rid
, from_hi
, from_lo
;
245 mb(); /* Finish writing source/reading dest before DMA starts */
247 used
= MC_CMD_MEMCPY_IN_LEN(count
);
248 if (WARN_ON(used
> MCDI_CTL_SDU_LEN_MAX
))
251 /* Allocate room for the largest request */
252 inbuf
= kzalloc(MCDI_CTL_SDU_LEN_MAX
, GFP_KERNEL
);
257 MCDI_SET_DWORD(record
, MEMCPY_IN_RECORD
, count
);
258 while (count
-- > 0) {
259 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_RID
,
261 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO
,
263 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI
,
264 (u32
)(req
->to_addr
>> 32));
265 if (req
->from_buf
== NULL
) {
266 from_rid
= req
->from_rid
;
267 from_lo
= (u32
)req
->from_addr
;
268 from_hi
= (u32
)(req
->from_addr
>> 32);
270 if (WARN_ON(used
+ req
->length
> MCDI_CTL_SDU_LEN_MAX
)) {
275 from_rid
= MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE
;
278 memcpy(inbuf
+ used
, req
->from_buf
, req
->length
);
282 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_RID
, from_rid
);
283 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO
,
285 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI
,
287 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_LENGTH
,
291 record
+= MC_CMD_MEMCPY_IN_RECORD_LEN
;
294 rc
= efx_mcdi_rpc(efx
, MC_CMD_MEMCPY
, inbuf
, used
, NULL
, 0, NULL
);
298 mb(); /* Don't write source/read dest before DMA is complete */
303 /* The TX filter is entirely controlled by this driver, and is modified
304 * underneath the feet of the VF
306 static void efx_sriov_reset_tx_filter(struct efx_vf
*vf
)
308 struct efx_nic
*efx
= vf
->efx
;
309 struct efx_filter_spec filter
;
313 if (vf
->tx_filter_id
!= -1) {
314 efx_filter_remove_id_safe(efx
, EFX_FILTER_PRI_REQUIRED
,
316 netif_dbg(efx
, hw
, efx
->net_dev
, "Removed vf %s tx filter %d\n",
317 vf
->pci_name
, vf
->tx_filter_id
);
318 vf
->tx_filter_id
= -1;
321 if (is_zero_ether_addr(vf
->addr
.mac_addr
))
324 /* Turn on TX filtering automatically if not explicitly
325 * enabled or disabled.
327 if (vf
->tx_filter_mode
== VF_TX_FILTER_AUTO
&& vf_max_tx_channels
<= 2)
328 vf
->tx_filter_mode
= VF_TX_FILTER_ON
;
330 vlan
= ntohs(vf
->addr
.tci
) & VLAN_VID_MASK
;
331 efx_filter_init_tx(&filter
, abs_index(vf
, 0));
332 rc
= efx_filter_set_eth_local(&filter
,
333 vlan
? vlan
: EFX_FILTER_VID_UNSPEC
,
337 rc
= efx_filter_insert_filter(efx
, &filter
, true);
339 netif_warn(efx
, hw
, efx
->net_dev
,
340 "Unable to migrate tx filter for vf %s\n",
343 netif_dbg(efx
, hw
, efx
->net_dev
, "Inserted vf %s tx filter %d\n",
345 vf
->tx_filter_id
= rc
;
349 /* The RX filter is managed here on behalf of the VF driver */
350 static void efx_sriov_reset_rx_filter(struct efx_vf
*vf
)
352 struct efx_nic
*efx
= vf
->efx
;
353 struct efx_filter_spec filter
;
357 if (vf
->rx_filter_id
!= -1) {
358 efx_filter_remove_id_safe(efx
, EFX_FILTER_PRI_REQUIRED
,
360 netif_dbg(efx
, hw
, efx
->net_dev
, "Removed vf %s rx filter %d\n",
361 vf
->pci_name
, vf
->rx_filter_id
);
362 vf
->rx_filter_id
= -1;
365 if (!vf
->rx_filtering
|| is_zero_ether_addr(vf
->addr
.mac_addr
))
368 vlan
= ntohs(vf
->addr
.tci
) & VLAN_VID_MASK
;
369 efx_filter_init_rx(&filter
, EFX_FILTER_PRI_REQUIRED
,
371 abs_index(vf
, vf
->rx_filter_qid
));
372 rc
= efx_filter_set_eth_local(&filter
,
373 vlan
? vlan
: EFX_FILTER_VID_UNSPEC
,
377 rc
= efx_filter_insert_filter(efx
, &filter
, true);
379 netif_warn(efx
, hw
, efx
->net_dev
,
380 "Unable to insert rx filter for vf %s\n",
383 netif_dbg(efx
, hw
, efx
->net_dev
, "Inserted vf %s rx filter %d\n",
385 vf
->rx_filter_id
= rc
;
389 static void __efx_sriov_update_vf_addr(struct efx_vf
*vf
)
391 efx_sriov_reset_tx_filter(vf
);
392 efx_sriov_reset_rx_filter(vf
);
393 queue_work(vfdi_workqueue
, &vf
->efx
->peer_work
);
396 /* Push the peer list to this VF. The caller must hold status_lock to interlock
397 * with VFDI requests, and they must be serialised against manipulation of
398 * local_page_list, either by acquiring local_lock or by running from
399 * efx_sriov_peer_work()
401 static void __efx_sriov_push_vf_status(struct efx_vf
*vf
)
403 struct efx_nic
*efx
= vf
->efx
;
404 struct vfdi_status
*status
= efx
->vfdi_status
.addr
;
405 struct efx_memcpy_req copy
[4];
406 struct efx_endpoint_page
*epp
;
407 unsigned int pos
, count
;
408 unsigned data_offset
;
411 WARN_ON(!mutex_is_locked(&vf
->status_lock
));
412 WARN_ON(!vf
->status_addr
);
414 status
->local
= vf
->addr
;
415 status
->generation_end
= ++status
->generation_start
;
417 memset(copy
, '\0', sizeof(copy
));
418 /* Write generation_start */
419 copy
[0].from_buf
= &status
->generation_start
;
420 copy
[0].to_rid
= vf
->pci_rid
;
421 copy
[0].to_addr
= vf
->status_addr
+ offsetof(struct vfdi_status
,
423 copy
[0].length
= sizeof(status
->generation_start
);
424 /* DMA the rest of the structure (excluding the generations). This
425 * assumes that the non-generation portion of vfdi_status is in
426 * one chunk starting at the version member.
428 data_offset
= offsetof(struct vfdi_status
, version
);
429 copy
[1].from_rid
= efx
->pci_dev
->devfn
;
430 copy
[1].from_addr
= efx
->vfdi_status
.dma_addr
+ data_offset
;
431 copy
[1].to_rid
= vf
->pci_rid
;
432 copy
[1].to_addr
= vf
->status_addr
+ data_offset
;
433 copy
[1].length
= status
->length
- data_offset
;
435 /* Copy the peer pages */
438 list_for_each_entry(epp
, &efx
->local_page_list
, link
) {
439 if (count
== vf
->peer_page_count
) {
440 /* The VF driver will know they need to provide more
441 * pages because peer_addr_count is too large.
445 copy
[pos
].from_buf
= NULL
;
446 copy
[pos
].from_rid
= efx
->pci_dev
->devfn
;
447 copy
[pos
].from_addr
= epp
->addr
;
448 copy
[pos
].to_rid
= vf
->pci_rid
;
449 copy
[pos
].to_addr
= vf
->peer_page_addrs
[count
];
450 copy
[pos
].length
= EFX_PAGE_SIZE
;
452 if (++pos
== ARRAY_SIZE(copy
)) {
453 efx_sriov_memcpy(efx
, copy
, ARRAY_SIZE(copy
));
459 /* Write generation_end */
460 copy
[pos
].from_buf
= &status
->generation_end
;
461 copy
[pos
].to_rid
= vf
->pci_rid
;
462 copy
[pos
].to_addr
= vf
->status_addr
+ offsetof(struct vfdi_status
,
464 copy
[pos
].length
= sizeof(status
->generation_end
);
465 efx_sriov_memcpy(efx
, copy
, pos
+ 1);
467 /* Notify the guest */
468 EFX_POPULATE_QWORD_3(event
,
469 FSF_AZ_EV_CODE
, FSE_CZ_EV_CODE_USER_EV
,
470 VFDI_EV_SEQ
, (vf
->msg_seqno
& 0xff),
471 VFDI_EV_TYPE
, VFDI_EV_TYPE_STATUS
);
473 efx_generate_event(efx
, EFX_VI_BASE
+ vf
->index
* efx_vf_size(efx
),
477 static void efx_sriov_bufs(struct efx_nic
*efx
, unsigned offset
,
478 u64
*addr
, unsigned count
)
483 for (pos
= 0; pos
< count
; ++pos
) {
484 EFX_POPULATE_QWORD_3(buf
,
485 FRF_AZ_BUF_ADR_REGION
, 0,
487 addr
? addr
[pos
] >> 12 : 0,
488 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
489 efx_sram_writeq(efx
, efx
->membase
+ FR_BZ_BUF_FULL_TBL
,
494 static bool bad_vf_index(struct efx_nic
*efx
, unsigned index
)
496 return index
>= efx_vf_size(efx
);
499 static bool bad_buf_count(unsigned buf_count
, unsigned max_entry_count
)
501 unsigned max_buf_count
= max_entry_count
*
502 sizeof(efx_qword_t
) / EFX_BUF_SIZE
;
504 return ((buf_count
& (buf_count
- 1)) || buf_count
> max_buf_count
);
507 /* Check that VI specified by per-port index belongs to a VF.
508 * Optionally set VF index and VI index within the VF.
510 static bool map_vi_index(struct efx_nic
*efx
, unsigned abs_index
,
511 struct efx_vf
**vf_out
, unsigned *rel_index_out
)
515 if (abs_index
< EFX_VI_BASE
)
517 vf_i
= (abs_index
- EFX_VI_BASE
) / efx_vf_size(efx
);
518 if (vf_i
>= efx
->vf_init_count
)
522 *vf_out
= efx
->vf
+ vf_i
;
524 *rel_index_out
= abs_index
% efx_vf_size(efx
);
528 static int efx_vfdi_init_evq(struct efx_vf
*vf
)
530 struct efx_nic
*efx
= vf
->efx
;
531 struct vfdi_req
*req
= vf
->buf
.addr
;
532 unsigned vf_evq
= req
->u
.init_evq
.index
;
533 unsigned buf_count
= req
->u
.init_evq
.buf_count
;
534 unsigned abs_evq
= abs_index(vf
, vf_evq
);
535 unsigned buftbl
= EFX_BUFTBL_EVQ_BASE(vf
, vf_evq
);
538 if (bad_vf_index(efx
, vf_evq
) ||
539 bad_buf_count(buf_count
, EFX_MAX_VF_EVQ_SIZE
)) {
541 netif_err(efx
, hw
, efx
->net_dev
,
542 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
543 vf
->pci_name
, vf_evq
, buf_count
);
544 return VFDI_RC_EINVAL
;
547 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_evq
.addr
, buf_count
);
549 EFX_POPULATE_OWORD_3(reg
,
550 FRF_CZ_TIMER_Q_EN
, 1,
551 FRF_CZ_HOST_NOTIFY_MODE
, 0,
552 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
553 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, abs_evq
);
554 EFX_POPULATE_OWORD_3(reg
,
556 FRF_AZ_EVQ_SIZE
, __ffs(buf_count
),
557 FRF_AZ_EVQ_BUF_BASE_ID
, buftbl
);
558 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
, abs_evq
);
561 memcpy(vf
->evq0_addrs
, req
->u
.init_evq
.addr
,
562 buf_count
* sizeof(u64
));
563 vf
->evq0_count
= buf_count
;
566 return VFDI_RC_SUCCESS
;
569 static int efx_vfdi_init_rxq(struct efx_vf
*vf
)
571 struct efx_nic
*efx
= vf
->efx
;
572 struct vfdi_req
*req
= vf
->buf
.addr
;
573 unsigned vf_rxq
= req
->u
.init_rxq
.index
;
574 unsigned vf_evq
= req
->u
.init_rxq
.evq
;
575 unsigned buf_count
= req
->u
.init_rxq
.buf_count
;
576 unsigned buftbl
= EFX_BUFTBL_RXQ_BASE(vf
, vf_rxq
);
580 if (bad_vf_index(efx
, vf_evq
) || bad_vf_index(efx
, vf_rxq
) ||
581 bad_buf_count(buf_count
, EFX_MAX_DMAQ_SIZE
)) {
583 netif_err(efx
, hw
, efx
->net_dev
,
584 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
585 "buf_count %d\n", vf
->pci_name
, vf_rxq
,
587 return VFDI_RC_EINVAL
;
589 if (__test_and_set_bit(req
->u
.init_rxq
.index
, vf
->rxq_mask
))
591 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_rxq
.addr
, buf_count
);
593 label
= req
->u
.init_rxq
.label
& EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL
);
594 EFX_POPULATE_OWORD_6(reg
,
595 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, buftbl
,
596 FRF_AZ_RX_DESCQ_EVQ_ID
, abs_index(vf
, vf_evq
),
597 FRF_AZ_RX_DESCQ_LABEL
, label
,
598 FRF_AZ_RX_DESCQ_SIZE
, __ffs(buf_count
),
599 FRF_AZ_RX_DESCQ_JUMBO
,
600 !!(req
->u
.init_rxq
.flags
&
601 VFDI_RXQ_FLAG_SCATTER_EN
),
602 FRF_AZ_RX_DESCQ_EN
, 1);
603 efx_writeo_table(efx
, ®
, FR_BZ_RX_DESC_PTR_TBL
,
604 abs_index(vf
, vf_rxq
));
606 return VFDI_RC_SUCCESS
;
609 static int efx_vfdi_init_txq(struct efx_vf
*vf
)
611 struct efx_nic
*efx
= vf
->efx
;
612 struct vfdi_req
*req
= vf
->buf
.addr
;
613 unsigned vf_txq
= req
->u
.init_txq
.index
;
614 unsigned vf_evq
= req
->u
.init_txq
.evq
;
615 unsigned buf_count
= req
->u
.init_txq
.buf_count
;
616 unsigned buftbl
= EFX_BUFTBL_TXQ_BASE(vf
, vf_txq
);
617 unsigned label
, eth_filt_en
;
620 if (bad_vf_index(efx
, vf_evq
) || bad_vf_index(efx
, vf_txq
) ||
621 vf_txq
>= vf_max_tx_channels
||
622 bad_buf_count(buf_count
, EFX_MAX_DMAQ_SIZE
)) {
624 netif_err(efx
, hw
, efx
->net_dev
,
625 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
626 "buf_count %d\n", vf
->pci_name
, vf_txq
,
628 return VFDI_RC_EINVAL
;
631 mutex_lock(&vf
->txq_lock
);
632 if (__test_and_set_bit(req
->u
.init_txq
.index
, vf
->txq_mask
))
634 mutex_unlock(&vf
->txq_lock
);
635 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_txq
.addr
, buf_count
);
637 eth_filt_en
= vf
->tx_filter_mode
== VF_TX_FILTER_ON
;
639 label
= req
->u
.init_txq
.label
& EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL
);
640 EFX_POPULATE_OWORD_8(reg
,
641 FRF_CZ_TX_DPT_Q_MASK_WIDTH
, min(efx
->vi_scale
, 1U),
642 FRF_CZ_TX_DPT_ETH_FILT_EN
, eth_filt_en
,
643 FRF_AZ_TX_DESCQ_EN
, 1,
644 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, buftbl
,
645 FRF_AZ_TX_DESCQ_EVQ_ID
, abs_index(vf
, vf_evq
),
646 FRF_AZ_TX_DESCQ_LABEL
, label
,
647 FRF_AZ_TX_DESCQ_SIZE
, __ffs(buf_count
),
648 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
649 efx_writeo_table(efx
, ®
, FR_BZ_TX_DESC_PTR_TBL
,
650 abs_index(vf
, vf_txq
));
652 return VFDI_RC_SUCCESS
;
655 /* Returns true when efx_vfdi_fini_all_queues should wake */
656 static bool efx_vfdi_flush_wake(struct efx_vf
*vf
)
658 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
661 return (!vf
->txq_count
&& !vf
->rxq_count
) ||
662 atomic_read(&vf
->rxq_retry_count
);
665 static void efx_vfdi_flush_clear(struct efx_vf
*vf
)
667 memset(vf
->txq_mask
, 0, sizeof(vf
->txq_mask
));
669 memset(vf
->rxq_mask
, 0, sizeof(vf
->rxq_mask
));
671 memset(vf
->rxq_retry_mask
, 0, sizeof(vf
->rxq_retry_mask
));
672 atomic_set(&vf
->rxq_retry_count
, 0);
675 static int efx_vfdi_fini_all_queues(struct efx_vf
*vf
)
677 struct efx_nic
*efx
= vf
->efx
;
679 unsigned count
= efx_vf_size(efx
);
680 unsigned vf_offset
= EFX_VI_BASE
+ vf
->index
* efx_vf_size(efx
);
681 unsigned timeout
= HZ
;
682 unsigned index
, rxqs_count
;
686 rxqs
= kmalloc(count
* sizeof(*rxqs
), GFP_KERNEL
);
688 return VFDI_RC_ENOMEM
;
691 if (efx
->fc_disable
++ == 0)
692 efx_mcdi_set_mac(efx
);
695 /* Flush all the initialized queues */
697 for (index
= 0; index
< count
; ++index
) {
698 if (test_bit(index
, vf
->txq_mask
)) {
699 EFX_POPULATE_OWORD_2(reg
,
700 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
701 FRF_AZ_TX_FLUSH_DESCQ
,
703 efx_writeo(efx
, ®
, FR_AZ_TX_FLUSH_DESCQ
);
705 if (test_bit(index
, vf
->rxq_mask
))
706 rxqs
[rxqs_count
++] = cpu_to_le32(vf_offset
+ index
);
709 atomic_set(&vf
->rxq_retry_count
, 0);
710 while (timeout
&& (vf
->rxq_count
|| vf
->txq_count
)) {
711 rc
= efx_mcdi_rpc(efx
, MC_CMD_FLUSH_RX_QUEUES
, (u8
*)rxqs
,
712 rxqs_count
* sizeof(*rxqs
), NULL
, 0, NULL
);
715 timeout
= wait_event_timeout(vf
->flush_waitq
,
716 efx_vfdi_flush_wake(vf
),
719 for (index
= 0; index
< count
; ++index
) {
720 if (test_and_clear_bit(index
, vf
->rxq_retry_mask
)) {
721 atomic_dec(&vf
->rxq_retry_count
);
723 cpu_to_le32(vf_offset
+ index
);
729 if (--efx
->fc_disable
== 0)
730 efx_mcdi_set_mac(efx
);
733 /* Irrespective of success/failure, fini the queues */
735 for (index
= 0; index
< count
; ++index
) {
736 efx_writeo_table(efx
, ®
, FR_BZ_RX_DESC_PTR_TBL
,
738 efx_writeo_table(efx
, ®
, FR_BZ_TX_DESC_PTR_TBL
,
740 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
,
742 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
,
745 efx_sriov_bufs(efx
, vf
->buftbl_base
, NULL
,
746 EFX_VF_BUFTBL_PER_VI
* efx_vf_size(efx
));
748 efx_vfdi_flush_clear(vf
);
752 return timeout
? 0 : VFDI_RC_ETIMEDOUT
;
755 static int efx_vfdi_insert_filter(struct efx_vf
*vf
)
757 struct efx_nic
*efx
= vf
->efx
;
758 struct vfdi_req
*req
= vf
->buf
.addr
;
759 unsigned vf_rxq
= req
->u
.mac_filter
.rxq
;
762 if (bad_vf_index(efx
, vf_rxq
) || vf
->rx_filtering
) {
764 netif_err(efx
, hw
, efx
->net_dev
,
765 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
766 "flags 0x%x\n", vf
->pci_name
, vf_rxq
,
767 req
->u
.mac_filter
.flags
);
768 return VFDI_RC_EINVAL
;
772 if (req
->u
.mac_filter
.flags
& VFDI_MAC_FILTER_FLAG_RSS
)
773 flags
|= EFX_FILTER_FLAG_RX_RSS
;
774 if (req
->u
.mac_filter
.flags
& VFDI_MAC_FILTER_FLAG_SCATTER
)
775 flags
|= EFX_FILTER_FLAG_RX_SCATTER
;
776 vf
->rx_filter_flags
= flags
;
777 vf
->rx_filter_qid
= vf_rxq
;
778 vf
->rx_filtering
= true;
780 efx_sriov_reset_rx_filter(vf
);
781 queue_work(vfdi_workqueue
, &efx
->peer_work
);
783 return VFDI_RC_SUCCESS
;
786 static int efx_vfdi_remove_all_filters(struct efx_vf
*vf
)
788 vf
->rx_filtering
= false;
789 efx_sriov_reset_rx_filter(vf
);
790 queue_work(vfdi_workqueue
, &vf
->efx
->peer_work
);
792 return VFDI_RC_SUCCESS
;
795 static int efx_vfdi_set_status_page(struct efx_vf
*vf
)
797 struct efx_nic
*efx
= vf
->efx
;
798 struct vfdi_req
*req
= vf
->buf
.addr
;
799 u64 page_count
= req
->u
.set_status_page
.peer_page_count
;
802 offsetof(struct vfdi_req
, u
.set_status_page
.peer_page_addr
[0]))
803 / sizeof(req
->u
.set_status_page
.peer_page_addr
[0]);
805 if (!req
->u
.set_status_page
.dma_addr
|| page_count
> max_page_count
) {
807 netif_err(efx
, hw
, efx
->net_dev
,
808 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
810 return VFDI_RC_EINVAL
;
813 mutex_lock(&efx
->local_lock
);
814 mutex_lock(&vf
->status_lock
);
815 vf
->status_addr
= req
->u
.set_status_page
.dma_addr
;
817 kfree(vf
->peer_page_addrs
);
818 vf
->peer_page_addrs
= NULL
;
819 vf
->peer_page_count
= 0;
822 vf
->peer_page_addrs
= kcalloc(page_count
, sizeof(u64
),
824 if (vf
->peer_page_addrs
) {
825 memcpy(vf
->peer_page_addrs
,
826 req
->u
.set_status_page
.peer_page_addr
,
827 page_count
* sizeof(u64
));
828 vf
->peer_page_count
= page_count
;
832 __efx_sriov_push_vf_status(vf
);
833 mutex_unlock(&vf
->status_lock
);
834 mutex_unlock(&efx
->local_lock
);
836 return VFDI_RC_SUCCESS
;
839 static int efx_vfdi_clear_status_page(struct efx_vf
*vf
)
841 mutex_lock(&vf
->status_lock
);
843 mutex_unlock(&vf
->status_lock
);
845 return VFDI_RC_SUCCESS
;
848 typedef int (*efx_vfdi_op_t
)(struct efx_vf
*vf
);
850 static const efx_vfdi_op_t vfdi_ops
[VFDI_OP_LIMIT
] = {
851 [VFDI_OP_INIT_EVQ
] = efx_vfdi_init_evq
,
852 [VFDI_OP_INIT_TXQ
] = efx_vfdi_init_txq
,
853 [VFDI_OP_INIT_RXQ
] = efx_vfdi_init_rxq
,
854 [VFDI_OP_FINI_ALL_QUEUES
] = efx_vfdi_fini_all_queues
,
855 [VFDI_OP_INSERT_FILTER
] = efx_vfdi_insert_filter
,
856 [VFDI_OP_REMOVE_ALL_FILTERS
] = efx_vfdi_remove_all_filters
,
857 [VFDI_OP_SET_STATUS_PAGE
] = efx_vfdi_set_status_page
,
858 [VFDI_OP_CLEAR_STATUS_PAGE
] = efx_vfdi_clear_status_page
,
861 static void efx_sriov_vfdi(struct work_struct
*work
)
863 struct efx_vf
*vf
= container_of(work
, struct efx_vf
, req
);
864 struct efx_nic
*efx
= vf
->efx
;
865 struct vfdi_req
*req
= vf
->buf
.addr
;
866 struct efx_memcpy_req copy
[2];
869 /* Copy this page into the local address space */
870 memset(copy
, '\0', sizeof(copy
));
871 copy
[0].from_rid
= vf
->pci_rid
;
872 copy
[0].from_addr
= vf
->req_addr
;
873 copy
[0].to_rid
= efx
->pci_dev
->devfn
;
874 copy
[0].to_addr
= vf
->buf
.dma_addr
;
875 copy
[0].length
= EFX_PAGE_SIZE
;
876 rc
= efx_sriov_memcpy(efx
, copy
, 1);
878 /* If we can't get the request, we can't reply to the caller */
880 netif_err(efx
, hw
, efx
->net_dev
,
881 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
887 if (req
->op
< VFDI_OP_LIMIT
&& vfdi_ops
[req
->op
] != NULL
) {
888 rc
= vfdi_ops
[req
->op
](vf
);
890 netif_dbg(efx
, hw
, efx
->net_dev
,
891 "vfdi request %d from %s ok\n",
892 req
->op
, vf
->pci_name
);
895 netif_dbg(efx
, hw
, efx
->net_dev
,
896 "ERROR: Unrecognised request %d from VF %s addr "
897 "%llx\n", req
->op
, vf
->pci_name
,
898 (unsigned long long)vf
->req_addr
);
899 rc
= VFDI_RC_EOPNOTSUPP
;
902 /* Allow subsequent VF requests */
906 /* Respond to the request */
908 req
->op
= VFDI_OP_RESPONSE
;
910 memset(copy
, '\0', sizeof(copy
));
911 copy
[0].from_buf
= &req
->rc
;
912 copy
[0].to_rid
= vf
->pci_rid
;
913 copy
[0].to_addr
= vf
->req_addr
+ offsetof(struct vfdi_req
, rc
);
914 copy
[0].length
= sizeof(req
->rc
);
915 copy
[1].from_buf
= &req
->op
;
916 copy
[1].to_rid
= vf
->pci_rid
;
917 copy
[1].to_addr
= vf
->req_addr
+ offsetof(struct vfdi_req
, op
);
918 copy
[1].length
= sizeof(req
->op
);
920 (void) efx_sriov_memcpy(efx
, copy
, ARRAY_SIZE(copy
));
925 /* After a reset the event queues inside the guests no longer exist. Fill the
926 * event ring in guest memory with VFDI reset events, then (re-initialise) the
927 * event queue to raise an interrupt. The guest driver will then recover.
929 static void efx_sriov_reset_vf(struct efx_vf
*vf
, struct efx_buffer
*buffer
)
931 struct efx_nic
*efx
= vf
->efx
;
932 struct efx_memcpy_req copy_req
[4];
934 unsigned int pos
, count
, k
, buftbl
, abs_evq
;
939 BUG_ON(buffer
->len
!= EFX_PAGE_SIZE
);
943 BUG_ON(vf
->evq0_count
& (vf
->evq0_count
- 1));
945 mutex_lock(&vf
->status_lock
);
946 EFX_POPULATE_QWORD_3(event
,
947 FSF_AZ_EV_CODE
, FSE_CZ_EV_CODE_USER_EV
,
948 VFDI_EV_SEQ
, vf
->msg_seqno
,
949 VFDI_EV_TYPE
, VFDI_EV_TYPE_RESET
);
951 for (pos
= 0; pos
< EFX_PAGE_SIZE
; pos
+= sizeof(event
))
952 memcpy(buffer
->addr
+ pos
, &event
, sizeof(event
));
954 for (pos
= 0; pos
< vf
->evq0_count
; pos
+= count
) {
955 count
= min_t(unsigned, vf
->evq0_count
- pos
,
956 ARRAY_SIZE(copy_req
));
957 for (k
= 0; k
< count
; k
++) {
958 copy_req
[k
].from_buf
= NULL
;
959 copy_req
[k
].from_rid
= efx
->pci_dev
->devfn
;
960 copy_req
[k
].from_addr
= buffer
->dma_addr
;
961 copy_req
[k
].to_rid
= vf
->pci_rid
;
962 copy_req
[k
].to_addr
= vf
->evq0_addrs
[pos
+ k
];
963 copy_req
[k
].length
= EFX_PAGE_SIZE
;
965 rc
= efx_sriov_memcpy(efx
, copy_req
, count
);
968 netif_err(efx
, hw
, efx
->net_dev
,
969 "ERROR: Unable to notify %s of reset"
970 ": %d\n", vf
->pci_name
, -rc
);
975 /* Reinitialise, arm and trigger evq0 */
976 abs_evq
= abs_index(vf
, 0);
977 buftbl
= EFX_BUFTBL_EVQ_BASE(vf
, 0);
978 efx_sriov_bufs(efx
, buftbl
, vf
->evq0_addrs
, vf
->evq0_count
);
980 EFX_POPULATE_OWORD_3(reg
,
981 FRF_CZ_TIMER_Q_EN
, 1,
982 FRF_CZ_HOST_NOTIFY_MODE
, 0,
983 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
984 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, abs_evq
);
985 EFX_POPULATE_OWORD_3(reg
,
987 FRF_AZ_EVQ_SIZE
, __ffs(vf
->evq0_count
),
988 FRF_AZ_EVQ_BUF_BASE_ID
, buftbl
);
989 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
, abs_evq
);
990 EFX_POPULATE_DWORD_1(ptr
, FRF_AZ_EVQ_RPTR
, 0);
991 efx_writed_table(efx
, &ptr
, FR_BZ_EVQ_RPTR
, abs_evq
);
993 mutex_unlock(&vf
->status_lock
);
996 static void efx_sriov_reset_vf_work(struct work_struct
*work
)
998 struct efx_vf
*vf
= container_of(work
, struct efx_vf
, req
);
999 struct efx_nic
*efx
= vf
->efx
;
1000 struct efx_buffer buf
;
1002 if (!efx_nic_alloc_buffer(efx
, &buf
, EFX_PAGE_SIZE
)) {
1003 efx_sriov_reset_vf(vf
, &buf
);
1004 efx_nic_free_buffer(efx
, &buf
);
1008 static void efx_sriov_handle_no_channel(struct efx_nic
*efx
)
1010 netif_err(efx
, drv
, efx
->net_dev
,
1011 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1012 "vector. IOV disabled\n");
1016 static int efx_sriov_probe_channel(struct efx_channel
*channel
)
1018 channel
->efx
->vfdi_channel
= channel
;
1023 efx_sriov_get_channel_name(struct efx_channel
*channel
, char *buf
, size_t len
)
1025 snprintf(buf
, len
, "%s-iov", channel
->efx
->name
);
1028 static const struct efx_channel_type efx_sriov_channel_type
= {
1029 .handle_no_channel
= efx_sriov_handle_no_channel
,
1030 .pre_probe
= efx_sriov_probe_channel
,
1031 .get_name
= efx_sriov_get_channel_name
,
1032 /* no copy operation; channel must not be reallocated */
1033 .keep_eventq
= true,
1036 void efx_sriov_probe(struct efx_nic
*efx
)
1043 if (efx_sriov_cmd(efx
, false, &efx
->vi_scale
, &count
))
1045 if (count
> 0 && count
> max_vfs
)
1048 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1049 efx
->vf_count
= count
;
1051 efx
->extra_channel_type
[EFX_EXTRA_CHANNEL_IOV
] = &efx_sriov_channel_type
;
1054 /* Copy the list of individual addresses into the vfdi_status.peers
1055 * array and auxillary pages, protected by %local_lock. Drop that lock
1056 * and then broadcast the address list to every VF.
1058 static void efx_sriov_peer_work(struct work_struct
*data
)
1060 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, peer_work
);
1061 struct vfdi_status
*vfdi_status
= efx
->vfdi_status
.addr
;
1063 struct efx_local_addr
*local_addr
;
1064 struct vfdi_endpoint
*peer
;
1065 struct efx_endpoint_page
*epp
;
1066 struct list_head pages
;
1067 unsigned int peer_space
;
1068 unsigned int peer_count
;
1071 mutex_lock(&efx
->local_lock
);
1073 /* Move the existing peer pages off %local_page_list */
1074 INIT_LIST_HEAD(&pages
);
1075 list_splice_tail_init(&efx
->local_page_list
, &pages
);
1077 /* Populate the VF addresses starting from entry 1 (entry 0 is
1080 peer
= vfdi_status
->peers
+ 1;
1081 peer_space
= ARRAY_SIZE(vfdi_status
->peers
) - 1;
1083 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1086 mutex_lock(&vf
->status_lock
);
1087 if (vf
->rx_filtering
&& !is_zero_ether_addr(vf
->addr
.mac_addr
)) {
1091 BUG_ON(peer_space
== 0);
1093 mutex_unlock(&vf
->status_lock
);
1096 /* Fill the remaining addresses */
1097 list_for_each_entry(local_addr
, &efx
->local_addr_list
, link
) {
1098 memcpy(peer
->mac_addr
, local_addr
->addr
, ETH_ALEN
);
1102 if (--peer_space
== 0) {
1103 if (list_empty(&pages
)) {
1104 epp
= kmalloc(sizeof(*epp
), GFP_KERNEL
);
1107 epp
->ptr
= dma_alloc_coherent(
1108 &efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1109 &epp
->addr
, GFP_KERNEL
);
1115 epp
= list_first_entry(
1116 &pages
, struct efx_endpoint_page
, link
);
1117 list_del(&epp
->link
);
1120 list_add_tail(&epp
->link
, &efx
->local_page_list
);
1121 peer
= (struct vfdi_endpoint
*)epp
->ptr
;
1122 peer_space
= EFX_PAGE_SIZE
/ sizeof(struct vfdi_endpoint
);
1125 vfdi_status
->peer_count
= peer_count
;
1126 mutex_unlock(&efx
->local_lock
);
1128 /* Free any now unused endpoint pages */
1129 while (!list_empty(&pages
)) {
1130 epp
= list_first_entry(
1131 &pages
, struct efx_endpoint_page
, link
);
1132 list_del(&epp
->link
);
1133 dma_free_coherent(&efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1134 epp
->ptr
, epp
->addr
);
1138 /* Finally, push the pages */
1139 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1142 mutex_lock(&vf
->status_lock
);
1143 if (vf
->status_addr
)
1144 __efx_sriov_push_vf_status(vf
);
1145 mutex_unlock(&vf
->status_lock
);
1149 static void efx_sriov_free_local(struct efx_nic
*efx
)
1151 struct efx_local_addr
*local_addr
;
1152 struct efx_endpoint_page
*epp
;
1154 while (!list_empty(&efx
->local_addr_list
)) {
1155 local_addr
= list_first_entry(&efx
->local_addr_list
,
1156 struct efx_local_addr
, link
);
1157 list_del(&local_addr
->link
);
1161 while (!list_empty(&efx
->local_page_list
)) {
1162 epp
= list_first_entry(&efx
->local_page_list
,
1163 struct efx_endpoint_page
, link
);
1164 list_del(&epp
->link
);
1165 dma_free_coherent(&efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1166 epp
->ptr
, epp
->addr
);
1171 static int efx_sriov_vf_alloc(struct efx_nic
*efx
)
1176 efx
->vf
= kzalloc(sizeof(struct efx_vf
) * efx
->vf_count
, GFP_KERNEL
);
1180 for (index
= 0; index
< efx
->vf_count
; ++index
) {
1181 vf
= efx
->vf
+ index
;
1185 vf
->rx_filter_id
= -1;
1186 vf
->tx_filter_mode
= VF_TX_FILTER_AUTO
;
1187 vf
->tx_filter_id
= -1;
1188 INIT_WORK(&vf
->req
, efx_sriov_vfdi
);
1189 INIT_WORK(&vf
->reset_work
, efx_sriov_reset_vf_work
);
1190 init_waitqueue_head(&vf
->flush_waitq
);
1191 mutex_init(&vf
->status_lock
);
1192 mutex_init(&vf
->txq_lock
);
1198 static void efx_sriov_vfs_fini(struct efx_nic
*efx
)
1203 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1206 efx_nic_free_buffer(efx
, &vf
->buf
);
1207 kfree(vf
->peer_page_addrs
);
1208 vf
->peer_page_addrs
= NULL
;
1209 vf
->peer_page_count
= 0;
1215 static int efx_sriov_vfs_init(struct efx_nic
*efx
)
1217 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1218 unsigned index
, devfn
, sriov
, buftbl_base
;
1223 sriov
= pci_find_ext_capability(pci_dev
, PCI_EXT_CAP_ID_SRIOV
);
1227 pci_read_config_word(pci_dev
, sriov
+ PCI_SRIOV_VF_OFFSET
, &offset
);
1228 pci_read_config_word(pci_dev
, sriov
+ PCI_SRIOV_VF_STRIDE
, &stride
);
1230 buftbl_base
= efx
->vf_buftbl_base
;
1231 devfn
= pci_dev
->devfn
+ offset
;
1232 for (index
= 0; index
< efx
->vf_count
; ++index
) {
1233 vf
= efx
->vf
+ index
;
1235 /* Reserve buffer entries */
1236 vf
->buftbl_base
= buftbl_base
;
1237 buftbl_base
+= EFX_VF_BUFTBL_PER_VI
* efx_vf_size(efx
);
1239 vf
->pci_rid
= devfn
;
1240 snprintf(vf
->pci_name
, sizeof(vf
->pci_name
),
1241 "%04x:%02x:%02x.%d",
1242 pci_domain_nr(pci_dev
->bus
), pci_dev
->bus
->number
,
1243 PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1245 rc
= efx_nic_alloc_buffer(efx
, &vf
->buf
, EFX_PAGE_SIZE
);
1255 efx_sriov_vfs_fini(efx
);
1259 int efx_sriov_init(struct efx_nic
*efx
)
1261 struct net_device
*net_dev
= efx
->net_dev
;
1262 struct vfdi_status
*vfdi_status
;
1265 /* Ensure there's room for vf_channel */
1266 BUILD_BUG_ON(EFX_MAX_CHANNELS
+ 1 >= EFX_VI_BASE
);
1267 /* Ensure that VI_BASE is aligned on VI_SCALE */
1268 BUILD_BUG_ON(EFX_VI_BASE
& ((1 << EFX_VI_SCALE_MAX
) - 1));
1270 if (efx
->vf_count
== 0)
1273 rc
= efx_sriov_cmd(efx
, true, NULL
, NULL
);
1277 rc
= efx_nic_alloc_buffer(efx
, &efx
->vfdi_status
, sizeof(*vfdi_status
));
1280 vfdi_status
= efx
->vfdi_status
.addr
;
1281 memset(vfdi_status
, 0, sizeof(*vfdi_status
));
1282 vfdi_status
->version
= 1;
1283 vfdi_status
->length
= sizeof(*vfdi_status
);
1284 vfdi_status
->max_tx_channels
= vf_max_tx_channels
;
1285 vfdi_status
->vi_scale
= efx
->vi_scale
;
1286 vfdi_status
->rss_rxq_count
= efx
->rss_spread
;
1287 vfdi_status
->peer_count
= 1 + efx
->vf_count
;
1288 vfdi_status
->timer_quantum_ns
= efx
->timer_quantum_ns
;
1290 rc
= efx_sriov_vf_alloc(efx
);
1294 mutex_init(&efx
->local_lock
);
1295 INIT_WORK(&efx
->peer_work
, efx_sriov_peer_work
);
1296 INIT_LIST_HEAD(&efx
->local_addr_list
);
1297 INIT_LIST_HEAD(&efx
->local_page_list
);
1299 rc
= efx_sriov_vfs_init(efx
);
1304 memcpy(vfdi_status
->peers
[0].mac_addr
,
1305 net_dev
->dev_addr
, ETH_ALEN
);
1306 efx
->vf_init_count
= efx
->vf_count
;
1309 efx_sriov_usrev(efx
, true);
1311 /* At this point we must be ready to accept VFDI requests */
1313 rc
= pci_enable_sriov(efx
->pci_dev
, efx
->vf_count
);
1317 netif_info(efx
, probe
, net_dev
,
1318 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1319 efx
->vf_count
, efx_vf_size(efx
));
1323 efx_sriov_usrev(efx
, false);
1325 efx
->vf_init_count
= 0;
1327 efx_sriov_vfs_fini(efx
);
1329 cancel_work_sync(&efx
->peer_work
);
1330 efx_sriov_free_local(efx
);
1333 efx_nic_free_buffer(efx
, &efx
->vfdi_status
);
1335 efx_sriov_cmd(efx
, false, NULL
, NULL
);
1340 void efx_sriov_fini(struct efx_nic
*efx
)
1345 if (efx
->vf_init_count
== 0)
1348 /* Disable all interfaces to reconfiguration */
1349 BUG_ON(efx
->vfdi_channel
->enabled
);
1350 efx_sriov_usrev(efx
, false);
1352 efx
->vf_init_count
= 0;
1355 /* Flush all reconfiguration work */
1356 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1358 cancel_work_sync(&vf
->req
);
1359 cancel_work_sync(&vf
->reset_work
);
1361 cancel_work_sync(&efx
->peer_work
);
1363 pci_disable_sriov(efx
->pci_dev
);
1365 /* Tear down back-end state */
1366 efx_sriov_vfs_fini(efx
);
1367 efx_sriov_free_local(efx
);
1369 efx_nic_free_buffer(efx
, &efx
->vfdi_status
);
1370 efx_sriov_cmd(efx
, false, NULL
, NULL
);
1373 void efx_sriov_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1375 struct efx_nic
*efx
= channel
->efx
;
1377 unsigned qid
, seq
, type
, data
;
1379 qid
= EFX_QWORD_FIELD(*event
, FSF_CZ_USER_QID
);
1381 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1382 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN
!= 0);
1383 seq
= EFX_QWORD_FIELD(*event
, VFDI_EV_SEQ
);
1384 type
= EFX_QWORD_FIELD(*event
, VFDI_EV_TYPE
);
1385 data
= EFX_QWORD_FIELD(*event
, VFDI_EV_DATA
);
1387 netif_vdbg(efx
, hw
, efx
->net_dev
,
1388 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1389 qid
, seq
, type
, data
);
1391 if (map_vi_index(efx
, qid
, &vf
, NULL
))
1396 if (type
== VFDI_EV_TYPE_REQ_WORD0
) {
1398 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1399 vf
->req_seqno
= seq
+ 1;
1401 } else if (seq
!= (vf
->req_seqno
++ & 0xff) || type
!= vf
->req_type
)
1404 switch (vf
->req_type
) {
1405 case VFDI_EV_TYPE_REQ_WORD0
:
1406 case VFDI_EV_TYPE_REQ_WORD1
:
1407 case VFDI_EV_TYPE_REQ_WORD2
:
1408 vf
->req_addr
|= (u64
)data
<< (vf
->req_type
<< 4);
1412 case VFDI_EV_TYPE_REQ_WORD3
:
1413 vf
->req_addr
|= (u64
)data
<< 48;
1414 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1416 queue_work(vfdi_workqueue
, &vf
->req
);
1421 if (net_ratelimit())
1422 netif_err(efx
, hw
, efx
->net_dev
,
1423 "ERROR: Screaming VFDI request from %s\n",
1425 /* Reset the request and sequence number */
1426 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1427 vf
->req_seqno
= seq
+ 1;
1430 void efx_sriov_flr(struct efx_nic
*efx
, unsigned vf_i
)
1434 if (vf_i
> efx
->vf_init_count
)
1436 vf
= efx
->vf
+ vf_i
;
1437 netif_info(efx
, hw
, efx
->net_dev
,
1438 "FLR on VF %s\n", vf
->pci_name
);
1440 vf
->status_addr
= 0;
1441 efx_vfdi_remove_all_filters(vf
);
1442 efx_vfdi_flush_clear(vf
);
1447 void efx_sriov_mac_address_changed(struct efx_nic
*efx
)
1449 struct vfdi_status
*vfdi_status
= efx
->vfdi_status
.addr
;
1451 if (!efx
->vf_init_count
)
1453 memcpy(vfdi_status
->peers
[0].mac_addr
,
1454 efx
->net_dev
->dev_addr
, ETH_ALEN
);
1455 queue_work(vfdi_workqueue
, &efx
->peer_work
);
1458 void efx_sriov_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1461 unsigned queue
, qid
;
1463 queue
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1464 if (map_vi_index(efx
, queue
, &vf
, &qid
))
1466 /* Ignore flush completions triggered by an FLR */
1467 if (!test_bit(qid
, vf
->txq_mask
))
1470 __clear_bit(qid
, vf
->txq_mask
);
1473 if (efx_vfdi_flush_wake(vf
))
1474 wake_up(&vf
->flush_waitq
);
1477 void efx_sriov_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1480 unsigned ev_failed
, queue
, qid
;
1482 queue
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1483 ev_failed
= EFX_QWORD_FIELD(*event
,
1484 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1485 if (map_vi_index(efx
, queue
, &vf
, &qid
))
1487 if (!test_bit(qid
, vf
->rxq_mask
))
1491 set_bit(qid
, vf
->rxq_retry_mask
);
1492 atomic_inc(&vf
->rxq_retry_count
);
1494 __clear_bit(qid
, vf
->rxq_mask
);
1497 if (efx_vfdi_flush_wake(vf
))
1498 wake_up(&vf
->flush_waitq
);
1501 /* Called from napi. Schedule the reset work item */
1502 void efx_sriov_desc_fetch_err(struct efx_nic
*efx
, unsigned dmaq
)
1507 if (map_vi_index(efx
, dmaq
, &vf
, &rel
))
1510 if (net_ratelimit())
1511 netif_err(efx
, hw
, efx
->net_dev
,
1512 "VF %d DMA Q %d reports descriptor fetch error.\n",
1514 queue_work(vfdi_workqueue
, &vf
->reset_work
);
1518 void efx_sriov_reset(struct efx_nic
*efx
)
1521 struct efx_buffer buf
;
1526 if (efx
->vf_init_count
== 0)
1529 efx_sriov_usrev(efx
, true);
1530 (void)efx_sriov_cmd(efx
, true, NULL
, NULL
);
1532 if (efx_nic_alloc_buffer(efx
, &buf
, EFX_PAGE_SIZE
))
1535 for (vf_i
= 0; vf_i
< efx
->vf_init_count
; ++vf_i
) {
1536 vf
= efx
->vf
+ vf_i
;
1537 efx_sriov_reset_vf(vf
, &buf
);
1540 efx_nic_free_buffer(efx
, &buf
);
1543 int efx_init_sriov(void)
1545 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1546 * efx_sriov_peer_work() spend almost all their time sleeping for
1547 * MCDI to complete anyway
1549 vfdi_workqueue
= create_singlethread_workqueue("sfc_vfdi");
1550 if (!vfdi_workqueue
)
1556 void efx_fini_sriov(void)
1558 destroy_workqueue(vfdi_workqueue
);
1561 int efx_sriov_set_vf_mac(struct net_device
*net_dev
, int vf_i
, u8
*mac
)
1563 struct efx_nic
*efx
= netdev_priv(net_dev
);
1566 if (vf_i
>= efx
->vf_init_count
)
1568 vf
= efx
->vf
+ vf_i
;
1570 mutex_lock(&vf
->status_lock
);
1571 memcpy(vf
->addr
.mac_addr
, mac
, ETH_ALEN
);
1572 __efx_sriov_update_vf_addr(vf
);
1573 mutex_unlock(&vf
->status_lock
);
1578 int efx_sriov_set_vf_vlan(struct net_device
*net_dev
, int vf_i
,
1581 struct efx_nic
*efx
= netdev_priv(net_dev
);
1585 if (vf_i
>= efx
->vf_init_count
)
1587 vf
= efx
->vf
+ vf_i
;
1589 mutex_lock(&vf
->status_lock
);
1590 tci
= (vlan
& VLAN_VID_MASK
) | ((qos
& 0x7) << VLAN_PRIO_SHIFT
);
1591 vf
->addr
.tci
= htons(tci
);
1592 __efx_sriov_update_vf_addr(vf
);
1593 mutex_unlock(&vf
->status_lock
);
1598 int efx_sriov_set_vf_spoofchk(struct net_device
*net_dev
, int vf_i
,
1601 struct efx_nic
*efx
= netdev_priv(net_dev
);
1605 if (vf_i
>= efx
->vf_init_count
)
1607 vf
= efx
->vf
+ vf_i
;
1609 mutex_lock(&vf
->txq_lock
);
1610 if (vf
->txq_count
== 0) {
1611 vf
->tx_filter_mode
=
1612 spoofchk
? VF_TX_FILTER_ON
: VF_TX_FILTER_OFF
;
1615 /* This cannot be changed while TX queues are running */
1618 mutex_unlock(&vf
->txq_lock
);
1622 int efx_sriov_get_vf_config(struct net_device
*net_dev
, int vf_i
,
1623 struct ifla_vf_info
*ivi
)
1625 struct efx_nic
*efx
= netdev_priv(net_dev
);
1629 if (vf_i
>= efx
->vf_init_count
)
1631 vf
= efx
->vf
+ vf_i
;
1634 memcpy(ivi
->mac
, vf
->addr
.mac_addr
, ETH_ALEN
);
1636 tci
= ntohs(vf
->addr
.tci
);
1637 ivi
->vlan
= tci
& VLAN_VID_MASK
;
1638 ivi
->qos
= (tci
>> VLAN_PRIO_SHIFT
) & 0x7;
1639 ivi
->spoofchk
= vf
->tx_filter_mode
== VF_TX_FILTER_ON
;