struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
struct vport *vport;
- struct hlist_node *n;
struct hlist_head *head;
head = vport_hash_bucket(dp, port_no);
- hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
if (vport->port_no == port_no)
return vport;
}
struct sk_buff *segs, *nskb;
int err;
- segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+ segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
if (IS_ERR(segs))
return PTR_ERR(segs);
len = sizeof(struct ovs_header);
len += nla_total_size(skb->len);
len += nla_total_size(FLOW_BUFSIZE);
- if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
- len += nla_total_size(8);
+ if (upcall_info->userdata)
+ len += NLA_ALIGN(upcall_info->userdata->nla_len);
user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) {
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
- nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
- nla_get_u64(upcall_info->userdata));
+ __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
+ nla_len(upcall_info->userdata),
+ nla_data(upcall_info->userdata));
nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
{
static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
- [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+ [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
};
struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
int error;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
- struct hlist_node *node, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+ hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
if (vport->port_no != OVSP_LOCAL)
ovs_dp_detach_port(vport);
}
vport = ovs_vport_rtnl_rcu(dp, port_no);
if (!vport)
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-ENODEV);
return vport;
} else
return ERR_PTR(-EINVAL);
rcu_read_lock();
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
- struct hlist_node *n;
j = 0;
- hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
if (j >= skip &&
ovs_vport_cmd_fill_info(vport, skb,
NETLINK_CB(cb->skb).portid,
static int __init dp_init(void)
{
- struct sk_buff *dummy_skb;
int err;
- BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n");
return stats;
}
- static int internal_dev_mac_addr(struct net_device *dev, void *p)
- {
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- return 0;
- }
-
/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
static void internal_dev_getinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "openvswitch");
+ strlcpy(info->driver, "openvswitch", sizeof(info->driver));
}
static const struct ethtool_ops internal_dev_ethtool_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
- .ndo_set_mac_address = internal_dev_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = internal_dev_change_mtu,
.ndo_get_stats64 = internal_dev_get_stats,
};
netdev->netdev_ops = &internal_dev_netdev_ops;
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->destructor = internal_dev_destructor;
SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
netdev->tx_queue_len = 0;