return false;
}
-static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
+static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
+ struct sk_buff *skb = *pskb;
struct slave *slave;
struct net_device *bond_dev;
struct bonding *bond;
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- return NULL;
-
slave = bond_slave_get_rcu(skb->dev);
bond_dev = ACCESS_ONCE(slave->dev->master);
if (unlikely(!bond_dev))
- return skb;
+ return RX_HANDLER_PASS;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
bond = netdev_priv(bond_dev);
slave->dev->last_rx = jiffies;
if (bond_should_deliver_exact_match(skb, slave, bond)) {
- skb->deliver_no_wcard = 1;
- return skb;
+ return RX_HANDLER_EXACT;
}
skb->dev = bond_dev;
if (unlikely(skb_cow_head(skb,
skb->data - skb_mac_header(skb)))) {
kfree_skb(skb);
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
}
- return skb;
+ return RX_HANDLER_ANOTHER;
}
/* enslave device <slave> to bond device <master> */
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
{
struct macvlan_port *port;
+ struct sk_buff *skb = *pskb;
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
- return skb;
+ return RX_HANDLER_PASS;
}
if (port->passthru)
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
- return skb;
+ return RX_HANDLER_PASS;
dev = vlan->dev;
if (unlikely(!(dev->flags & IFF_UP))) {
kfree_skb(skb);
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
len = skb->len + ETH_HLEN;
skb = skb_share_check(skb, GFP_ATOMIC);
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
};
typedef enum gro_result gro_result_t;
-typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
+/*
+ * enum rx_handler_result - Possible return values for rx_handlers.
+ * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
+ * further.
+ * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
+ * case skb->dev was changed by rx_handler.
+ * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
+ * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ *
+ * rx_handlers are functions called from inside __netif_receive_skb(), to do
+ * special processing of the skb, prior to delivery to protocol handlers.
+ *
+ * Currently, a net_device can only have a single rx_handler registered. Trying
+ * to register a second rx_handler will return -EBUSY.
+ *
+ * To register a rx_handler on a net_device, use netdev_rx_handler_register().
+ * To unregister a rx_handler on a net_device, use
+ * netdev_rx_handler_unregister().
+ *
+ * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
+ * do with the skb.
+ *
+ * If the rx_handler consumed to skb in some way, it should return
+ * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
+ * the skb to be delivered in some other ways.
+ *
+ * If the rx_handler changed skb->dev, to divert the skb to another
+ * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
+ * new device will be called if it exists.
+ *
+ * If the rx_handler consider the skb should be ignored, it should return
+ * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
+ * are registred on exact device (ptype->dev == skb->dev).
+ *
+ * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * delivered, it should return RX_HANDLER_PASS.
+ *
+ * A device without a registered rx_handler will behave as if rx_handler
+ * returned RX_HANDLER_PASS.
+ */
+
+enum rx_handler_result {
+ RX_HANDLER_CONSUMED,
+ RX_HANDLER_ANOTHER,
+ RX_HANDLER_EXACT,
+ RX_HANDLER_PASS,
+};
+typedef enum rx_handler_result rx_handler_result_t;
+typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
extern void __napi_schedule(struct napi_struct *n);
kmemcheck_bitfield_begin(flags2);
__u16 queue_mapping:16;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
- __u8 ndisc_nodetype:2,
- deliver_no_wcard:1;
-#else
- __u8 deliver_no_wcard:1;
+ __u8 ndisc_nodetype:2;
#endif
__u8 ooo_okay:1;
kmemcheck_bitfield_end(flags2);
* Return NULL if skb is handled
* note: already called with rcu_read_lock
*/
-struct sk_buff *br_handle_frame(struct sk_buff *skb)
+rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{
struct net_bridge_port *p;
+ struct sk_buff *skb = *pskb;
const unsigned char *dest = eth_hdr(skb)->h_dest;
br_should_route_hook_t *rhook;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
- return skb;
+ return RX_HANDLER_PASS;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
goto drop;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
- return NULL;
+ return RX_HANDLER_CONSUMED;
p = br_port_get_rcu(skb->dev);
goto forward;
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
- NULL, br_handle_local_finish))
- return NULL; /* frame consumed by filter */
- else
- return skb; /* continue processing */
+ NULL, br_handle_local_finish)) {
+ return RX_HANDLER_CONSUMED; /* consumed by filter */
+ } else {
+ *pskb = skb;
+ return RX_HANDLER_PASS; /* continue processing */
+ }
}
forward:
case BR_STATE_FORWARDING:
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {
- if ((*rhook)(skb))
- return skb;
+ if ((*rhook)(skb)) {
+ *pskb = skb;
+ return RX_HANDLER_PASS;
+ }
dest = eth_hdr(skb)->h_dest;
}
/* fall through */
drop:
kfree_skb(skb);
}
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
/* br_input.c */
extern int br_handle_frame_finish(struct sk_buff *skb);
-extern struct sk_buff *br_handle_frame(struct sk_buff *skb);
+extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
/* br_ioctl.c */
extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
* on a failure.
*
* The caller must hold the rtnl_mutex.
+ *
+ * For a general description of rx_handler, see enum rx_handler_result.
*/
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
struct net_device *null_or_dev;
+ bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
- struct net_device *prev_dev;
-
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
- prev_dev = skb->dev;
- skb = rx_handler(skb);
- if (!skb)
+ switch (rx_handler(&skb)) {
+ case RX_HANDLER_CONSUMED:
goto out;
- if (skb->dev != prev_dev)
+ case RX_HANDLER_ANOTHER:
goto another_round;
+ case RX_HANDLER_EXACT:
+ deliver_exact = true;
+ case RX_HANDLER_PASS:
+ break;
+ default:
+ BUG();
+ }
}
if (vlan_tx_tag_present(skb)) {
vlan_on_bond_hook(skb);
/* deliver only exact match when indicated */
- null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
+ null_or_dev = deliver_exact ? skb->dev : NULL;
type = skb->protocol;
list_for_each_entry_rcu(ptype,
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
new->priority = old->priority;
- new->deliver_no_wcard = old->deliver_no_wcard;
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
new->ipvs_property = old->ipvs_property;
#endif