};
/**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
-struct efx_vf {
+struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
*/
static struct workqueue_struct *vfdi_workqueue;
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
}
}
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
- struct efx_vf **vf_out, unsigned *rel_index_out)
+ struct siena_vf **vf_out, unsigned *rel_index_out)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
return true;
if (vf_out)
- *vf_out = efx->vf + vf_i;
+ *vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
atomic_read(&vf->rxq_retry_count);
}
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
atomic_set(&vf->rxq_retry_count, 0);
}
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
return VFDI_RC_SUCCESS;
}
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
- struct efx_vf *vf;
+ struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
- struct efx_vf *vf;
+ struct siena_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
- efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
- if (!efx->vf)
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
- struct efx_vf *vf;
+ struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
void efx_siena_sriov_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
- struct efx_vf *vf;
+ struct siena_vf *vf;
ASSERT_RTNL();
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);