/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2016 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
static LIST_HEAD(vnet_list);
static DEFINE_MUTEX(vnet_list_mutex);
+static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+ unsigned int hash = vnet_hashfn(skb->data);
+ struct hlist_head *hp = &vp->port_hash[hash];
+ struct vnet_port *port;
+
+ hlist_for_each_entry_rcu(port, hp, hash) {
+ if (!sunvnet_port_is_up_common(port))
+ continue;
+ if (ether_addr_equal(port->raddr, skb->data))
+ return port;
+ }
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ if (!port->switch_port)
+ continue;
+ if (!sunvnet_port_is_up_common(port))
+ continue;
+ return port;
+ }
+ return NULL;
+}
+
+/* func arg to vnet_start_xmit_common() to get the proper tx port */
+static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+
+ return __tx_port_find(vp, skb);
+}
+
+static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = __tx_port_find(vp, skb);
+
+ if (!port)
+ return 0;
+
+ return port->q_index;
+}
+
+/* Wrappers to common functions */
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find);
+}
+
+static void vnet_set_rx_mode(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+
+ return sunvnet_set_rx_mode_common(dev, vp);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+
+ return sunvnet_poll_controller_common(dev, vp);
+}
+#endif
+
static const struct net_device_ops vnet_ops = {
.ndo_open = sunvnet_open_common,
.ndo_stop = sunvnet_close_common,
- .ndo_set_rx_mode = sunvnet_set_rx_mode_common,
+ .ndo_set_rx_mode = vnet_set_rx_mode,
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = sunvnet_tx_timeout_common,
.ndo_change_mtu = sunvnet_change_mtu_common,
- .ndo_start_xmit = sunvnet_start_xmit_common,
- .ndo_select_queue = sunvnet_select_queue_common,
+ .ndo_start_xmit = vnet_start_xmit,
+ .ndo_select_queue = vnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = sunvnet_poll_controller_common,
+ .ndo_poll_controller = vnet_poll_controller,
#endif
};
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2016 Oracle. All rights reserved.
*/
#include <linux/module.h>
int sunvnet_send_attr_common(struct vio_driver_state *vio)
{
struct vnet_port *port = to_vnet_port(vio);
- struct net_device *dev = port->vp->dev;
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_net_attr_info pkt;
int framelen = ETH_FRAME_LEN;
int i, err;
static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
{
- struct net_device *dev = port->vp->dev;
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct vio_dring_data *pkt = msgbuf;
struct net_device *dev;
- struct vnet *vp;
u32 end;
struct vio_net_desc *desc;
struct netdev_queue *txq;
return 0;
end = pkt->end_idx;
- vp = port->vp;
- dev = vp->dev;
+ dev = VNET_PORT_TO_NET_DEVICE(port);
netif_tx_lock(dev);
if (unlikely(!idx_is_pending(dr, end))) {
netif_tx_unlock(dev);
static int handle_mcast(struct vnet_port *port, void *msgbuf)
{
struct vio_net_mcast_info *pkt = msgbuf;
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
if (pkt->tag.stype != VIO_SUBTYPE_ACK)
pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
- port->vp->dev->name,
+ dev->name,
pkt->tag.type,
pkt->tag.stype,
pkt->tag.stype_env,
{
struct netdev_queue *txq;
- txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+ txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index);
__netif_tx_lock(txq, smp_processor_id());
if (likely(netif_tx_queue_stopped(txq))) {
struct vio_dring_state *dr;
__netif_tx_unlock(txq);
}
-static inline bool port_is_up(struct vnet_port *vnet)
+bool sunvnet_port_is_up_common(struct vnet_port *vnet)
{
struct vio_driver_state *vio = &vnet->vio;
return !!(vio->hs_state & VIO_HS_COMPLETE);
}
+EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
static int vnet_event_napi(struct vnet_port *port, int budget)
{
napi_resume:
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- if (!port_is_up(port)) {
+ if (!sunvnet_port_is_up_common(port)) {
/* failures like handshake_failure()
* may have cleaned up dring, but
* NAPI polling may bring us here.
return err;
}
-static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
- unsigned int hash = vnet_hashfn(skb->data);
- struct hlist_head *hp = &vp->port_hash[hash];
- struct vnet_port *port;
-
- hlist_for_each_entry_rcu(port, hp, hash) {
- if (!port_is_up(port))
- continue;
- if (ether_addr_equal(port->raddr, skb->data))
- return port;
- }
- list_for_each_entry_rcu(port, &vp->port_list, list) {
- if (!port->switch_port)
- continue;
- if (!port_is_up(port))
- continue;
- return port;
- }
- return NULL;
-}
-
static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
unsigned *pending)
{
struct sk_buff *freeskbs;
unsigned pending;
- netif_tx_lock(port->vp->dev);
+ netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
freeskbs = vnet_clean_tx_ring(port, &pending);
- netif_tx_unlock(port->vp->dev);
+ netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
vnet_free_skbs(freeskbs);
return skb;
}
-u16 sunvnet_select_queue_common(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
{
- struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = __tx_port_find(vp, skb);
-
- if (port == NULL)
- return 0;
- return port->q_index;
-}
-EXPORT_SYMBOL_GPL(sunvnet_select_queue_common);
-
-static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
-{
- struct net_device *dev = port->vp->dev;
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct sk_buff *segs;
int maclen, datalen;
curr->csum_offset = offsetof(struct udphdr, check);
if (!(status & NETDEV_TX_MASK))
- status = sunvnet_start_xmit_common(curr, dev);
+ status = sunvnet_start_xmit_common(curr, dev,
+ vnet_tx_port);
if (status & NETDEV_TX_MASK)
dev_kfree_skb_any(curr);
}
return NETDEV_TX_OK;
}
-int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev)
+int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
{
- struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
struct netdev_queue *txq;
rcu_read_lock();
- port = __tx_port_find(vp, skb);
+ port = vnet_tx_port(skb, dev);
if (unlikely(!port)) {
rcu_read_unlock();
goto out_dropped;
}
if (skb_is_gso(skb) && skb->len > port->tsolen) {
- err = vnet_handle_offloads(port, skb);
+ err = vnet_handle_offloads(port, skb, vnet_tx_port);
rcu_read_unlock();
return err;
}
}
}
-void sunvnet_set_rx_mode_common(struct net_device *dev)
+void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
{
- struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
rcu_read_lock();
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-void sunvnet_poll_controller_common(struct net_device *dev)
+void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
{
- struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
unsigned long flags;
n = vp->nports++;
n = n & (VNET_MAX_TXQS - 1);
port->q_index = n;
- netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+ netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index));
+
}
EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
void sunvnet_port_rm_txq_common(struct vnet_port *port)
{
port->vp->nports--;
- netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+ netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index));
}
EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
};
struct vnet;
+
+/* Structure to describe a vnet-port or vsw-port in the MD.
+ * If the vsw bit is set, this structure represents a vswitch
+ * port, and the net_device can be found from ->dev. If the
+ * vsw bit is not set, the net_device is available from ->vp->dev.
+ * See the VNET_PORT_TO_NET_DEVICE macro below.
+ */
struct vnet_port {
struct vio_driver_state vio;
u8 raddr[ETH_ALEN];
unsigned switch_port:1;
unsigned tso:1;
- unsigned __pad:14;
+ unsigned vsw:1;
+ unsigned __pad:13;
struct vnet *vp;
+ struct net_device *dev;
struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
int nports;
};
+/* Def used by common code to get the net_device from the proper location */
+#define VNET_PORT_TO_NET_DEVICE(__port) \
+ ((__port)->vsw ? (__port)->dev : (__port)->vp->dev)
+
/* Common funcs */
void sunvnet_clean_timer_expire_common(unsigned long port0);
int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
-void sunvnet_set_rx_mode_common(struct net_device *dev);
+void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
void sunvnet_tx_timeout_common(struct net_device *dev);
int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu);
-int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev);
-u16 sunvnet_select_queue_common(struct net_device *dev,
- struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback);
+int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *));
#ifdef CONFIG_NET_POLL_CONTROLLER
-void sunvnet_poll_controller_common(struct net_device *dev);
+void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp);
#endif
void sunvnet_event_common(void *arg, int event);
int sunvnet_send_attr_common(struct vio_driver_state *vio);
void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
int sunvnet_poll_common(struct napi_struct *napi, int budget);
void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
+bool sunvnet_port_is_up_common(struct vnet_port *vnet);
void sunvnet_port_add_txq_common(struct vnet_port *port);
void sunvnet_port_rm_txq_common(struct vnet_port *port);