4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
57 #include <net/tcp_states.h>
58 #include <trace/events/skb.h>
61 * Is a socket 'connection oriented' ?
63 static inline int connection_based(struct sock
*sk
)
65 return sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
;
68 static int receiver_wake_function(wait_queue_t
*wait
, unsigned int mode
, int sync
,
71 unsigned long bits
= (unsigned long)key
;
74 * Avoid a wakeup if event not interesting for us
76 if (bits
&& !(bits
& (POLLIN
| POLLERR
)))
78 return autoremove_wake_function(wait
, mode
, sync
, key
);
83 static int wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
)
86 DEFINE_WAIT_FUNC(wait
, receiver_wake_function
);
88 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
91 error
= sock_error(sk
);
95 if (!skb_queue_empty(&sk
->sk_receive_queue
))
98 /* Socket shut down? */
99 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
102 /* Sequenced packets can come disconnected.
103 * If so we report the problem
106 if (connection_based(sk
) &&
107 !(sk
->sk_state
== TCP_ESTABLISHED
|| sk
->sk_state
== TCP_LISTEN
))
111 if (signal_pending(current
))
115 *timeo_p
= schedule_timeout(*timeo_p
);
117 finish_wait(sk_sleep(sk
), &wait
);
120 error
= sock_intr_errno(*timeo_p
);
131 * __skb_recv_datagram - Receive a datagram skbuff
134 * @off: an offset in bytes to peek skb from. Returns an offset
135 * within an skb where data actually starts
136 * @peeked: returns non-zero if this packet has been seen before
137 * @err: error code returned
139 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
140 * and possible races. This replaces identical code in packet, raw and
141 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
142 * the long standing peek and read race for datagram sockets. If you
143 * alter this routine remember it must be re-entrant.
145 * This function will lock the socket if a skb is returned, so the caller
146 * needs to unlock the socket in that case (usually by calling
149 * * It does not lock socket since today. This function is
150 * * free of race conditions. This measure should/can improve
151 * * significantly datagram socket latencies at high loads,
152 * * when data copying to user space takes lots of time.
153 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
157 * The order of the tests when we find no data waiting are specified
158 * quite explicitly by POSIX 1003.1g, don't change them without having
159 * the standard around please.
161 struct sk_buff
*__skb_recv_datagram(struct sock
*sk
, unsigned int flags
,
162 int *peeked
, int *off
, int *err
)
167 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
169 int error
= sock_error(sk
);
174 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
177 /* Again only user level code calls this function, so nothing
178 * interrupt level will suddenly eat the receive_queue.
180 * Look at current nfs client by the way...
181 * However, this function was correct in any case. 8)
183 unsigned long cpu_flags
;
184 struct sk_buff_head
*queue
= &sk
->sk_receive_queue
;
186 spin_lock_irqsave(&queue
->lock
, cpu_flags
);
187 skb_queue_walk(queue
, skb
) {
188 *peeked
= skb
->peeked
;
189 if (flags
& MSG_PEEK
) {
190 if (*off
>= skb
->len
) {
195 atomic_inc(&skb
->users
);
197 __skb_unlink(skb
, queue
);
199 spin_unlock_irqrestore(&queue
->lock
, cpu_flags
);
202 spin_unlock_irqrestore(&queue
->lock
, cpu_flags
);
204 /* User doesn't want to wait */
209 } while (!wait_for_packet(sk
, err
, &timeo
));
217 EXPORT_SYMBOL(__skb_recv_datagram
);
219 struct sk_buff
*skb_recv_datagram(struct sock
*sk
, unsigned int flags
,
220 int noblock
, int *err
)
224 return __skb_recv_datagram(sk
, flags
| (noblock
? MSG_DONTWAIT
: 0),
227 EXPORT_SYMBOL(skb_recv_datagram
);
229 void skb_free_datagram(struct sock
*sk
, struct sk_buff
*skb
)
232 sk_mem_reclaim_partial(sk
);
234 EXPORT_SYMBOL(skb_free_datagram
);
236 void skb_free_datagram_locked(struct sock
*sk
, struct sk_buff
*skb
)
240 if (likely(atomic_read(&skb
->users
) == 1))
242 else if (likely(!atomic_dec_and_test(&skb
->users
)))
245 slow
= lock_sock_fast(sk
);
247 sk_mem_reclaim_partial(sk
);
248 unlock_sock_fast(sk
, slow
);
250 /* skb is now orphaned, can be freed outside of locked section */
253 EXPORT_SYMBOL(skb_free_datagram_locked
);
256 * skb_kill_datagram - Free a datagram skbuff forcibly
258 * @skb: datagram skbuff
261 * This function frees a datagram skbuff that was received by
262 * skb_recv_datagram. The flags argument must match the one
263 * used for skb_recv_datagram.
265 * If the MSG_PEEK flag is set, and the packet is still on the
266 * receive queue of the socket, it will be taken off the queue
267 * before it is freed.
269 * This function currently only disables BH when acquiring the
270 * sk_receive_queue lock. Therefore it must not be used in a
271 * context where that lock is acquired in an IRQ context.
273 * It returns 0 if the packet was removed by us.
276 int skb_kill_datagram(struct sock
*sk
, struct sk_buff
*skb
, unsigned int flags
)
280 if (flags
& MSG_PEEK
) {
282 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
283 if (skb
== skb_peek(&sk
->sk_receive_queue
)) {
284 __skb_unlink(skb
, &sk
->sk_receive_queue
);
285 atomic_dec(&skb
->users
);
288 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
292 atomic_inc(&sk
->sk_drops
);
293 sk_mem_reclaim_partial(sk
);
297 EXPORT_SYMBOL(skb_kill_datagram
);
300 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
301 * @skb: buffer to copy
302 * @offset: offset in the buffer to start copying from
303 * @to: io vector to copy to
304 * @len: amount of data to copy from buffer to iovec
306 * Note: the iovec is modified during the copy.
308 int skb_copy_datagram_iovec(const struct sk_buff
*skb
, int offset
,
309 struct iovec
*to
, int len
)
311 int start
= skb_headlen(skb
);
312 int i
, copy
= start
- offset
;
313 struct sk_buff
*frag_iter
;
315 trace_skb_copy_datagram_iovec(skb
, len
);
321 if (memcpy_toiovec(to
, skb
->data
+ offset
, copy
))
323 if ((len
-= copy
) == 0)
328 /* Copy paged appendix. Hmm... why does this look so complicated? */
329 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
331 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
333 WARN_ON(start
> offset
+ len
);
335 end
= start
+ skb_frag_size(frag
);
336 if ((copy
= end
- offset
) > 0) {
339 struct page
*page
= skb_frag_page(frag
);
344 err
= memcpy_toiovec(to
, vaddr
+ frag
->page_offset
+
345 offset
- start
, copy
);
356 skb_walk_frags(skb
, frag_iter
) {
359 WARN_ON(start
> offset
+ len
);
361 end
= start
+ frag_iter
->len
;
362 if ((copy
= end
- offset
) > 0) {
365 if (skb_copy_datagram_iovec(frag_iter
,
369 if ((len
-= copy
) == 0)
381 EXPORT_SYMBOL(skb_copy_datagram_iovec
);
384 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
385 * @skb: buffer to copy
386 * @offset: offset in the buffer to start copying from
387 * @to: io vector to copy to
388 * @to_offset: offset in the io vector to start copying to
389 * @len: amount of data to copy from buffer to iovec
391 * Returns 0 or -EFAULT.
392 * Note: the iovec is not modified during the copy.
394 int skb_copy_datagram_const_iovec(const struct sk_buff
*skb
, int offset
,
395 const struct iovec
*to
, int to_offset
,
398 int start
= skb_headlen(skb
);
399 int i
, copy
= start
- offset
;
400 struct sk_buff
*frag_iter
;
406 if (memcpy_toiovecend(to
, skb
->data
+ offset
, to_offset
, copy
))
408 if ((len
-= copy
) == 0)
414 /* Copy paged appendix. Hmm... why does this look so complicated? */
415 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
417 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
419 WARN_ON(start
> offset
+ len
);
421 end
= start
+ skb_frag_size(frag
);
422 if ((copy
= end
- offset
) > 0) {
425 struct page
*page
= skb_frag_page(frag
);
430 err
= memcpy_toiovecend(to
, vaddr
+ frag
->page_offset
+
431 offset
- start
, to_offset
, copy
);
443 skb_walk_frags(skb
, frag_iter
) {
446 WARN_ON(start
> offset
+ len
);
448 end
= start
+ frag_iter
->len
;
449 if ((copy
= end
- offset
) > 0) {
452 if (skb_copy_datagram_const_iovec(frag_iter
,
457 if ((len
-= copy
) == 0)
470 EXPORT_SYMBOL(skb_copy_datagram_const_iovec
);
473 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
474 * @skb: buffer to copy
475 * @offset: offset in the buffer to start copying to
476 * @from: io vector to copy to
477 * @from_offset: offset in the io vector to start copying from
478 * @len: amount of data to copy to buffer from iovec
480 * Returns 0 or -EFAULT.
481 * Note: the iovec is not modified during the copy.
483 int skb_copy_datagram_from_iovec(struct sk_buff
*skb
, int offset
,
484 const struct iovec
*from
, int from_offset
,
487 int start
= skb_headlen(skb
);
488 int i
, copy
= start
- offset
;
489 struct sk_buff
*frag_iter
;
495 if (memcpy_fromiovecend(skb
->data
+ offset
, from
, from_offset
,
498 if ((len
-= copy
) == 0)
504 /* Copy paged appendix. Hmm... why does this look so complicated? */
505 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
507 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
509 WARN_ON(start
> offset
+ len
);
511 end
= start
+ skb_frag_size(frag
);
512 if ((copy
= end
- offset
) > 0) {
515 struct page
*page
= skb_frag_page(frag
);
520 err
= memcpy_fromiovecend(vaddr
+ frag
->page_offset
+
522 from
, from_offset
, copy
);
535 skb_walk_frags(skb
, frag_iter
) {
538 WARN_ON(start
> offset
+ len
);
540 end
= start
+ frag_iter
->len
;
541 if ((copy
= end
- offset
) > 0) {
544 if (skb_copy_datagram_from_iovec(frag_iter
,
550 if ((len
-= copy
) == 0)
563 EXPORT_SYMBOL(skb_copy_datagram_from_iovec
);
565 static int skb_copy_and_csum_datagram(const struct sk_buff
*skb
, int offset
,
566 u8 __user
*to
, int len
,
569 int start
= skb_headlen(skb
);
570 int i
, copy
= start
- offset
;
571 struct sk_buff
*frag_iter
;
579 *csump
= csum_and_copy_to_user(skb
->data
+ offset
, to
, copy
,
583 if ((len
-= copy
) == 0)
590 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
592 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
594 WARN_ON(start
> offset
+ len
);
596 end
= start
+ skb_frag_size(frag
);
597 if ((copy
= end
- offset
) > 0) {
601 struct page
*page
= skb_frag_page(frag
);
606 csum2
= csum_and_copy_to_user(vaddr
+
613 *csump
= csum_block_add(*csump
, csum2
, pos
);
623 skb_walk_frags(skb
, frag_iter
) {
626 WARN_ON(start
> offset
+ len
);
628 end
= start
+ frag_iter
->len
;
629 if ((copy
= end
- offset
) > 0) {
633 if (skb_copy_and_csum_datagram(frag_iter
,
638 *csump
= csum_block_add(*csump
, csum2
, pos
);
639 if ((len
-= copy
) == 0)
654 __sum16
__skb_checksum_complete_head(struct sk_buff
*skb
, int len
)
658 sum
= csum_fold(skb_checksum(skb
, 0, len
, skb
->csum
));
660 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
))
661 netdev_rx_csum_fault(skb
->dev
);
662 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
666 EXPORT_SYMBOL(__skb_checksum_complete_head
);
668 __sum16
__skb_checksum_complete(struct sk_buff
*skb
)
670 return __skb_checksum_complete_head(skb
, skb
->len
);
672 EXPORT_SYMBOL(__skb_checksum_complete
);
675 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
677 * @hlen: hardware length
680 * Caller _must_ check that skb will fit to this iovec.
682 * Returns: 0 - success.
683 * -EINVAL - checksum failure.
684 * -EFAULT - fault during copy. Beware, in this case iovec
687 int skb_copy_and_csum_datagram_iovec(struct sk_buff
*skb
,
688 int hlen
, struct iovec
*iov
)
691 int chunk
= skb
->len
- hlen
;
696 /* Skip filled elements.
697 * Pretty silly, look at memcpy_toiovec, though 8)
699 while (!iov
->iov_len
)
702 if (iov
->iov_len
< chunk
) {
703 if (__skb_checksum_complete(skb
))
705 if (skb_copy_datagram_iovec(skb
, hlen
, iov
, chunk
))
708 csum
= csum_partial(skb
->data
, hlen
, skb
->csum
);
709 if (skb_copy_and_csum_datagram(skb
, hlen
, iov
->iov_base
,
714 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
))
715 netdev_rx_csum_fault(skb
->dev
);
716 iov
->iov_len
-= chunk
;
717 iov
->iov_base
+= chunk
;
725 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec
);
728 * datagram_poll - generic datagram poll
733 * Datagram poll: Again totally generic. This also handles
734 * sequenced packet sockets providing the socket receive queue
735 * is only ever holding data ready to receive.
737 * Note: when you _don't_ use this routine for this protocol,
738 * and you use a different write policy from sock_writeable()
739 * then please supply your own write_space callback.
741 unsigned int datagram_poll(struct file
*file
, struct socket
*sock
,
744 struct sock
*sk
= sock
->sk
;
747 sock_poll_wait(file
, sk_sleep(sk
), wait
);
750 /* exceptional events? */
751 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
753 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
754 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
755 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
759 if (!skb_queue_empty(&sk
->sk_receive_queue
))
760 mask
|= POLLIN
| POLLRDNORM
;
762 /* Connection-based need to check for termination and startup */
763 if (connection_based(sk
)) {
764 if (sk
->sk_state
== TCP_CLOSE
)
766 /* connection hasn't started yet? */
767 if (sk
->sk_state
== TCP_SYN_SENT
)
772 if (sock_writeable(sk
))
773 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
775 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
779 EXPORT_SYMBOL(datagram_poll
);