int hlen,
struct iovec *iov);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
+ unsigned int flags);
extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
int len, unsigned int csum);
extern int skb_copy_bits(const struct sk_buff *skb, int offset,
#include <linux/rtnetlink.h>
#include <linux/poll.h>
#include <linux/highmem.h>
+#include <linux/spinlock.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
kfree_skb(skb);
}
+/**
+ * skb_kill_datagram - Free a datagram skbuff forcibly
+ * @sk: socket
+ * @skb: datagram skbuff
+ * @flags: MSG_ flags
+ *
+ * This function frees a datagram skbuff that was received by
+ * skb_recv_datagram. The flags argument must match the one
+ * used for skb_recv_datagram.
+ *
+ * If the MSG_PEEK flag is set, and the packet is still on the
+ * receive queue of the socket, it will be taken off the queue
+ * before it is freed.
+ *
+ * This function currently only disables BH when acquiring the
+ * sk_receive_queue lock. Therefore it must not be used in a
+ * context where that lock is acquired in an IRQ context.
+ */
+
+void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+{
+ if (flags & MSG_PEEK) {
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ atomic_dec(&skb->users);
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ }
+
+ kfree_skb(skb);
+}
+
+EXPORT_SYMBOL(skb_kill_datagram);
+
/**
* skb_copy_datagram_iovec - Copy a datagram to an iovec.
* @skb: buffer to copy
csum_copy_err:
UDP_INC_STATS_BH(UDP_MIB_INERRORS);
- /* Clear queue. */
- if (flags&MSG_PEEK) {
- int clear = 0;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (skb == skb_peek(&sk->sk_receive_queue)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
- clear = 1;
- }
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- if (clear)
- kfree_skb(skb);
- }
-
- skb_free_datagram(sk, skb);
+ skb_kill_datagram(sk, skb, flags);
if (noblock)
return -EAGAIN;
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/skbuff.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <asm/bug.h>
return err;
csum_copy_err:
- /* Clear queue. */
- if (flags&MSG_PEEK) {
- int clear = 0;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (skb == skb_peek(&sk->sk_receive_queue)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
- clear = 1;
- }
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- if (clear)
- kfree_skb(skb);
- }
+ skb_kill_datagram(sk, skb, flags);
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
/* FIXME: increment a raw6 drops counter here */
- goto out_free;
+ goto out;
}
static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
+#include <linux/skbuff.h>
#include <asm/uaccess.h>
#include <net/sock.h>
return err;
csum_copy_err:
- /* Clear queue. */
- if (flags&MSG_PEEK) {
- int clear = 0;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (skb == skb_peek(&sk->sk_receive_queue)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
- clear = 1;
- }
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- if (clear)
- kfree_skb(skb);
- }
-
- skb_free_datagram(sk, skb);
+ skb_kill_datagram(sk, skb, flags);
if (flags & MSG_DONTWAIT) {
UDP6_INC_STATS_USER(UDP_MIB_INERRORS);