#endif /* End CONFIG_NET_POLL_CONTROLLER */
static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- struct tc_to_netdev *tc_to_netdev)
+ void *type_data)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct tc_mqprio_qopt *mqprio = type_data;
u8 tc;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- tc = tc_to_netdev->mqprio->num_tc;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ tc = mqprio->num_tc;
if (tc > pdata->hw_feat.tc_cnt)
return -EINVAL;
}
int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
+ return bnx2x_setup_tc(dev, mqprio->num_tc);
}
/* called with rtnl_lock */
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc);
+ void *type_data);
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
}
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *ntc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
+ return bnxt_setup_mq_tc(dev, mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
}
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
switch (type) {
case TC_SETUP_CLSU32:
- return cxgb_setup_tc_cls_u32(dev, tc->cls_u32);
+ return cxgb_setup_tc_cls_u32(dev, type_data);
default:
return -EOPNOTSUPP;
}
}
static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = tc->mqprio->num_tc;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
if (num_tc == priv->num_tc)
return 0;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- return hns3_setup_tc(dev, tc->mqprio->num_tc);
+ return hns3_setup_tc(dev, mqprio->num_tc);
}
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
}
static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return fm10k_setup_tc(dev, tc->mqprio->num_tc);
+ return fm10k_setup_tc(dev, mqprio->num_tc);
}
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
}
static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return i40e_setup_tc(netdev, tc->mqprio->num_tc);
+ return i40e_setup_tc(netdev, mqprio->num_tc);
}
/**
}
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
switch (type) {
case TC_SETUP_CLSU32:
- return ixgbe_setup_tc_cls_u32(dev, tc->cls_u32);
+ return ixgbe_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_MQPRIO:
- return ixgbe_setup_tc_mqprio(dev, tc->mqprio);
+ return ixgbe_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
}
}
static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
+ if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
+ return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
}
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(dev, tc->cls_flower);
+ return mlx5e_setup_tc_cls_flower(dev, type_data);
case TC_SETUP_MQPRIO:
- return mlx5e_setup_tc_mqprio(dev, tc->mqprio);
+ return mlx5e_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
}
return 0;
}
-static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
- struct tc_to_netdev *tc)
+static int
+mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
+ struct tc_cls_flower_offload *cls_flower)
{
- struct tc_cls_flower_offload *cls_flower = tc->cls_flower;
struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
dev = mlx5_eswitch_get_uplink_netdev(esw);
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- tc);
+ cls_flower);
}
switch (cls_flower->command) {
}
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(dev, tc);
+ return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
default:
return -EOPNOTSUPP;
}
#ifndef __MLX5_EN_TC_H__
#define __MLX5_EN_TC_H__
+#include <net/pkt_cls.h>
+
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
int mlx5e_tc_init(struct mlx5e_priv *priv);
}
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
switch (type) {
case TC_SETUP_CLSMATCHALL:
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port,
- tc->cls_mall);
+ return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port,
- tc->cls_flower);
+ return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
}
static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ enum tc_setup_type type, void *type_data)
{
- struct tc_cls_bpf_offload *cls_bpf = tc->cls_bpf;
+ struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = netdev_priv(netdev);
if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) ||
#include <linux/hashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <net/pkt_cls.h>
-struct tc_to_netdev;
struct net_device;
struct nfp_app;
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, struct tc_to_netdev *tc);
+ enum tc_setup_type type, void *type_data);
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
}
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, struct tc_to_netdev *tc)
+ enum tc_setup_type type, void *type_data)
{
- struct tc_cls_flower_offload *cls_flower = tc->cls_flower;
+ struct tc_cls_flower_offload *cls_flower = type_data;
if (type != TC_SETUP_CLSFLOWER ||
TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
struct net_device;
struct pci_dev;
struct sk_buff;
-struct tc_to_netdev;
struct sk_buff;
struct nfp_app;
struct nfp_cpp;
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, struct tc_to_netdev *tc);
+ enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
static inline int nfp_app_setup_tc(struct nfp_app *app,
struct net_device *netdev,
- enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ enum tc_setup_type type, void *type_data)
{
if (!app || !app->type->setup_tc)
return -EOPNOTSUPP;
- return app->type->setup_tc(app, netdev, type, tc);
+ return app->type->setup_tc(app, netdev, type, type_data);
}
static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
};
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
struct nfp_port *port;
if (!port)
return -EOPNOTSUPP;
- return nfp_app_setup_tc(port->app, netdev, type, tc);
+ return nfp_app_setup_tc(port->app, netdev, type, type_data);
}
struct nfp_port *
#include <net/devlink.h>
-struct tc_to_netdev;
struct net_device;
struct nfp_app;
struct nfp_pf;
extern const struct switchdev_ops nfp_port_switchdev_ops;
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- struct tc_to_netdev *tc);
+ void *type_data);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
struct nfp_port *
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- struct tc_to_netdev *tc);
+ void *type_data);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- struct tc_to_netdev *tc);
+ void *type_data);
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
extern bool ef4_separate_tx_channels;
}
int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- struct tc_to_netdev *ntc)
+ void *type_data)
{
struct ef4_nic *efx = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
struct ef4_channel *channel;
struct ef4_tx_queue *tx_queue;
unsigned tc, num_tc;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- num_tc = ntc->mqprio->num_tc;
+ num_tc = mqprio->num_tc;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
return -EINVAL;
- ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
if (num_tc == net_dev->num_tc)
return 0;
}
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- struct tc_to_netdev *ntc)
+ void *type_data)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tc, num_tc;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- num_tc = ntc->mqprio->num_tc;
+ num_tc = mqprio->num_tc;
if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
- ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
if (num_tc == net_dev->num_tc)
return 0;
}
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
+ struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = tc->mqprio->num_tc;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
/* Sanity-check the number of traffic classes requested */
if ((dev->real_num_tx_queues <= 1) ||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
-/* These structures hold the attributes of qdisc and classifiers
- * that are being passed to the netdevice through the setup_tc op.
- */
enum tc_setup_type {
TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSBPF,
};
-struct tc_cls_u32_offload;
-
-struct tc_to_netdev {
- union {
- struct tc_cls_u32_offload *cls_u32;
- struct tc_cls_flower_offload *cls_flower;
- struct tc_cls_matchall_offload *cls_mall;
- struct tc_cls_bpf_offload *cls_bpf;
- struct tc_mqprio_qopt *mqprio;
- };
-};
-
/* These structures hold the attributes of xdp state that are being passed
* to the netdevice through the xdp op.
*/
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
- * struct tc_to_netdev *tc);
+ * void *type_data);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
* tx queues stopped. This allows the netdevice to perform queue
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
enum tc_setup_type type,
- struct tc_to_netdev *tc);
+ void *type_data);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
}
static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
- struct tc_to_netdev *tc)
+ void *type_data)
{
switch (type) {
case TC_SETUP_CLSMATCHALL:
- return dsa_slave_setup_tc_cls_matchall(dev, tc->cls_mall);
+ return dsa_slave_setup_tc_cls_matchall(dev, type_data);
default:
return -EOPNOTSUPP;
}
enum tc_clsbpf_command cmd)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_bpf_offload bpf_offload = {};
- struct tc_to_netdev offload;
+ struct tc_cls_bpf_offload cls_bpf = {};
int err;
- offload.cls_bpf = &bpf_offload;
+ tc_cls_common_offload_init(&cls_bpf.common, tp);
+ cls_bpf.command = cmd;
+ cls_bpf.exts = &prog->exts;
+ cls_bpf.prog = prog->filter;
+ cls_bpf.name = prog->bpf_name;
+ cls_bpf.exts_integrated = prog->exts_integrated;
+ cls_bpf.gen_flags = prog->gen_flags;
- tc_cls_common_offload_init(&bpf_offload.common, tp);
- bpf_offload.command = cmd;
- bpf_offload.exts = &prog->exts;
- bpf_offload.prog = prog->filter;
- bpf_offload.name = prog->bpf_name;
- bpf_offload.exts_integrated = prog->exts_integrated;
- bpf_offload.gen_flags = prog->gen_flags;
-
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &offload);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
u32 handle;
u32 flags;
struct rcu_head rcu;
- struct tc_to_netdev tc;
struct net_device *hw_dev;
};
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
{
- struct tc_cls_flower_offload offload = {0};
+ struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev;
- struct tc_to_netdev *tc = &f->tc;
if (!tc_can_offload(dev, tp))
return;
- tc_cls_common_offload_init(&offload.common, tp);
- offload.command = TC_CLSFLOWER_DESTROY;
- offload.cookie = (unsigned long)f;
+ tc_cls_common_offload_init(&cls_flower.common, tp);
+ cls_flower.command = TC_CLSFLOWER_DESTROY;
+ cls_flower.cookie = (unsigned long) f;
- tc->cls_flower = &offload;
-
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc);
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
struct cls_fl_filter *f)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_flower_offload offload = {0};
- struct tc_to_netdev *tc = &f->tc;
+ struct tc_cls_flower_offload cls_flower = {};
int err;
if (!tc_can_offload(dev, tp)) {
return tc_skip_sw(f->flags) ? -EINVAL : 0;
}
dev = f->hw_dev;
- offload.egress_dev = true;
+ cls_flower.egress_dev = true;
} else {
f->hw_dev = dev;
}
- tc_cls_common_offload_init(&offload.common, tp);
- offload.command = TC_CLSFLOWER_REPLACE;
- offload.cookie = (unsigned long)f;
- offload.dissector = dissector;
- offload.mask = mask;
- offload.key = &f->mkey;
- offload.exts = &f->exts;
-
- tc->cls_flower = &offload;
+ tc_cls_common_offload_init(&cls_flower.common, tp);
+ cls_flower.command = TC_CLSFLOWER_REPLACE;
+ cls_flower.cookie = (unsigned long) f;
+ cls_flower.dissector = dissector;
+ cls_flower.mask = mask;
+ cls_flower.key = &f->mkey;
+ cls_flower.exts = &f->exts;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
+ &cls_flower);
if (!err)
f->flags |= TCA_CLS_FLAGS_IN_HW;
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
{
- struct tc_cls_flower_offload offload = {0};
+ struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev;
- struct tc_to_netdev *tc = &f->tc;
if (!tc_can_offload(dev, tp))
return;
- tc_cls_common_offload_init(&offload.common, tp);
- offload.command = TC_CLSFLOWER_STATS;
- offload.cookie = (unsigned long)f;
- offload.exts = &f->exts;
-
- tc->cls_flower = &offload;
+ tc_cls_common_offload_init(&cls_flower.common, tp);
+ cls_flower.command = TC_CLSFLOWER_STATS;
+ cls_flower.cookie = (unsigned long) f;
+ cls_flower.exts = &f->exts;
- dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, tc);
+ dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS,
+ &cls_flower);
}
static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
unsigned long cookie)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_to_netdev offload;
- struct tc_cls_matchall_offload mall_offload = {0};
+ struct tc_cls_matchall_offload cls_mall = {};
int err;
- tc_cls_common_offload_init(&mall_offload.common, tp);
- offload.cls_mall = &mall_offload;
- offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
- offload.cls_mall->exts = &head->exts;
- offload.cls_mall->cookie = cookie;
+ tc_cls_common_offload_init(&cls_mall.common, tp);
+ cls_mall.command = TC_CLSMATCHALL_REPLACE;
+ cls_mall.exts = &head->exts;
+ cls_mall.cookie = cookie;
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL,
- &offload);
+ &cls_mall);
if (!err)
head->flags |= TCA_CLS_FLAGS_IN_HW;
unsigned long cookie)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_to_netdev offload;
- struct tc_cls_matchall_offload mall_offload = {0};
+ struct tc_cls_matchall_offload cls_mall = {};
- tc_cls_common_offload_init(&mall_offload.common, tp);
- offload.cls_mall = &mall_offload;
- offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
- offload.cls_mall->exts = NULL;
- offload.cls_mall->cookie = cookie;
+ tc_cls_common_offload_init(&cls_mall.common, tp);
+ cls_mall.command = TC_CLSMATCHALL_DESTROY;
+ cls_mall.cookie = cookie;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &offload);
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &cls_mall);
}
static void mall_destroy(struct tcf_proto *tp)
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_u32_offload u32_offload = {0};
- struct tc_to_netdev offload;
+ struct tc_cls_u32_offload cls_u32 = {};
- offload.cls_u32 = &u32_offload;
+ if (!tc_should_offload(dev, tp, 0))
+ return;
- if (tc_should_offload(dev, tp, 0)) {
- tc_cls_common_offload_init(&u32_offload.common, tp);
- offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
- offload.cls_u32->knode.handle = handle;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload);
- }
+ tc_cls_common_offload_init(&cls_u32.common, tp);
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ cls_u32.knode.handle = handle;
+
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
}
static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_u32_offload u32_offload = {0};
- struct tc_to_netdev offload;
+ struct tc_cls_u32_offload cls_u32 = {};
int err;
if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
- offload.cls_u32 = &u32_offload;
-
- tc_cls_common_offload_init(&u32_offload.common, tp);
- offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
- offload.cls_u32->hnode.divisor = h->divisor;
- offload.cls_u32->hnode.handle = h->handle;
- offload.cls_u32->hnode.prio = h->prio;
+ tc_cls_common_offload_init(&cls_u32.common, tp);
+ cls_u32.command = TC_CLSU32_NEW_HNODE;
+ cls_u32.hnode.divisor = h->divisor;
+ cls_u32.hnode.handle = h->handle;
+ cls_u32.hnode.prio = h->prio;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
if (tc_skip_sw(flags))
return err;
static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_u32_offload u32_offload = {0};
- struct tc_to_netdev offload;
+ struct tc_cls_u32_offload cls_u32 = {};
- offload.cls_u32 = &u32_offload;
+ if (!tc_should_offload(dev, tp, 0))
+ return;
- if (tc_should_offload(dev, tp, 0)) {
- tc_cls_common_offload_init(&u32_offload.common, tp);
- offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
- offload.cls_u32->hnode.divisor = h->divisor;
- offload.cls_u32->hnode.handle = h->handle;
- offload.cls_u32->hnode.prio = h->prio;
+ tc_cls_common_offload_init(&cls_u32.common, tp);
+ cls_u32.command = TC_CLSU32_DELETE_HNODE;
+ cls_u32.hnode.divisor = h->divisor;
+ cls_u32.hnode.handle = h->handle;
+ cls_u32.hnode.prio = h->prio;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload);
- }
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
}
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
- struct tc_cls_u32_offload u32_offload = {0};
- struct tc_to_netdev offload;
+ struct tc_cls_u32_offload cls_u32 = {};
int err;
- offload.cls_u32 = &u32_offload;
-
if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
- tc_cls_common_offload_init(&u32_offload.common, tp);
- offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
- offload.cls_u32->knode.handle = n->handle;
- offload.cls_u32->knode.fshift = n->fshift;
+ tc_cls_common_offload_init(&cls_u32.common, tp);
+ cls_u32.command = TC_CLSU32_REPLACE_KNODE;
+ cls_u32.knode.handle = n->handle;
+ cls_u32.knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
- offload.cls_u32->knode.val = n->val;
- offload.cls_u32->knode.mask = n->mask;
+ cls_u32.knode.val = n->val;
+ cls_u32.knode.mask = n->mask;
#else
- offload.cls_u32->knode.val = 0;
- offload.cls_u32->knode.mask = 0;
+ cls_u32.knode.val = 0;
+ cls_u32.knode.mask = 0;
#endif
- offload.cls_u32->knode.sel = &n->sel;
- offload.cls_u32->knode.exts = &n->exts;
+ cls_u32.knode.sel = &n->sel;
+ cls_u32.knode.exts = &n->exts;
if (n->ht_down)
- offload.cls_u32->knode.link_handle = n->ht_down->handle;
+ cls_u32.knode.link_handle = n->ht_down->handle;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
if (!err)
n->flags |= TCA_CLS_FLAGS_IN_HW;
}
if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
- struct tc_mqprio_qopt offload = { 0 };
- struct tc_to_netdev tc = { { .mqprio = &offload } };
+ struct tc_mqprio_qopt mqprio = {};
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc);
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &mqprio);
} else {
netdev_set_num_tc(dev, 0);
}
* supplied and verified mapping
*/
if (qopt->hw) {
- struct tc_mqprio_qopt offload = *qopt;
- struct tc_to_netdev tc = { { .mqprio = &offload } };
+ struct tc_mqprio_qopt mqprio = *qopt;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO,
+ &mqprio);
if (err)
return err;
- priv->hw_offload = offload.hw;
+ priv->hw_offload = mqprio.hw;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)