Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / datagram.c
1 /*
2 * SUCS NET3:
3 *
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
11 *
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
13 * udp.c code)
14 *
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54
55 #include <net/checksum.h>
56 #include <net/sock.h>
57 #include <net/tcp_states.h>
58 #include <trace/events/skb.h>
59
60 /*
61 * Is a socket 'connection oriented' ?
62 */
63 static inline int connection_based(struct sock *sk)
64 {
65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
66 }
67
68 static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
69 void *key)
70 {
71 unsigned long bits = (unsigned long)key;
72
73 /*
74 * Avoid a wakeup if event not interesting for us
75 */
76 if (bits && !(bits & (POLLIN | POLLERR)))
77 return 0;
78 return autoremove_wake_function(wait, mode, sync, key);
79 }
80 /*
81 * Wait for the last received packet to be different from skb
82 */
83 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
84 const struct sk_buff *skb)
85 {
86 int error;
87 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
88
89 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
90
91 /* Socket errors? */
92 error = sock_error(sk);
93 if (error)
94 goto out_err;
95
96 if (sk->sk_receive_queue.prev != skb)
97 goto out;
98
99 /* Socket shut down? */
100 if (sk->sk_shutdown & RCV_SHUTDOWN)
101 goto out_noerr;
102
103 /* Sequenced packets can come disconnected.
104 * If so we report the problem
105 */
106 error = -ENOTCONN;
107 if (connection_based(sk) &&
108 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
109 goto out_err;
110
111 /* handle signals */
112 if (signal_pending(current))
113 goto interrupted;
114
115 error = 0;
116 *timeo_p = schedule_timeout(*timeo_p);
117 out:
118 finish_wait(sk_sleep(sk), &wait);
119 return error;
120 interrupted:
121 error = sock_intr_errno(*timeo_p);
122 out_err:
123 *err = error;
124 goto out;
125 out_noerr:
126 *err = 0;
127 error = 1;
128 goto out;
129 }
130
131 /**
132 * __skb_recv_datagram - Receive a datagram skbuff
133 * @sk: socket
134 * @flags: MSG_ flags
135 * @peeked: returns non-zero if this packet has been seen before
136 * @off: an offset in bytes to peek skb from. Returns an offset
137 * within an skb where data actually starts
138 * @err: error code returned
139 *
140 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
141 * and possible races. This replaces identical code in packet, raw and
142 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
143 * the long standing peek and read race for datagram sockets. If you
144 * alter this routine remember it must be re-entrant.
145 *
146 * This function will lock the socket if a skb is returned, so the caller
147 * needs to unlock the socket in that case (usually by calling
148 * skb_free_datagram)
149 *
150 * * It does not lock socket since today. This function is
151 * * free of race conditions. This measure should/can improve
152 * * significantly datagram socket latencies at high loads,
153 * * when data copying to user space takes lots of time.
154 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
155 * * 8) Great win.)
156 * * --ANK (980729)
157 *
158 * The order of the tests when we find no data waiting are specified
159 * quite explicitly by POSIX 1003.1g, don't change them without having
160 * the standard around please.
161 */
162 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
163 int *peeked, int *off, int *err)
164 {
165 struct sk_buff *skb, *last;
166 long timeo;
167 /*
168 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
169 */
170 int error = sock_error(sk);
171
172 if (error)
173 goto no_packet;
174
175 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
176
177 do {
178 /* Again only user level code calls this function, so nothing
179 * interrupt level will suddenly eat the receive_queue.
180 *
181 * Look at current nfs client by the way...
182 * However, this function was correct in any case. 8)
183 */
184 unsigned long cpu_flags;
185 struct sk_buff_head *queue = &sk->sk_receive_queue;
186 int _off = *off;
187
188 last = (struct sk_buff *)queue;
189 spin_lock_irqsave(&queue->lock, cpu_flags);
190 skb_queue_walk(queue, skb) {
191 last = skb;
192 *peeked = skb->peeked;
193 if (flags & MSG_PEEK) {
194 if (_off >= skb->len && (skb->len || _off ||
195 skb->peeked)) {
196 _off -= skb->len;
197 continue;
198 }
199 skb->peeked = 1;
200 atomic_inc(&skb->users);
201 } else
202 __skb_unlink(skb, queue);
203
204 spin_unlock_irqrestore(&queue->lock, cpu_flags);
205 *off = _off;
206 return skb;
207 }
208 spin_unlock_irqrestore(&queue->lock, cpu_flags);
209
210 /* User doesn't want to wait */
211 error = -EAGAIN;
212 if (!timeo)
213 goto no_packet;
214
215 } while (!wait_for_more_packets(sk, err, &timeo, last));
216
217 return NULL;
218
219 no_packet:
220 *err = error;
221 return NULL;
222 }
223 EXPORT_SYMBOL(__skb_recv_datagram);
224
225 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
226 int noblock, int *err)
227 {
228 int peeked, off = 0;
229
230 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
231 &peeked, &off, err);
232 }
233 EXPORT_SYMBOL(skb_recv_datagram);
234
235 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
236 {
237 consume_skb(skb);
238 sk_mem_reclaim_partial(sk);
239 }
240 EXPORT_SYMBOL(skb_free_datagram);
241
242 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
243 {
244 bool slow;
245
246 if (likely(atomic_read(&skb->users) == 1))
247 smp_rmb();
248 else if (likely(!atomic_dec_and_test(&skb->users)))
249 return;
250
251 slow = lock_sock_fast(sk);
252 skb_orphan(skb);
253 sk_mem_reclaim_partial(sk);
254 unlock_sock_fast(sk, slow);
255
256 /* skb is now orphaned, can be freed outside of locked section */
257 __kfree_skb(skb);
258 }
259 EXPORT_SYMBOL(skb_free_datagram_locked);
260
261 /**
262 * skb_kill_datagram - Free a datagram skbuff forcibly
263 * @sk: socket
264 * @skb: datagram skbuff
265 * @flags: MSG_ flags
266 *
267 * This function frees a datagram skbuff that was received by
268 * skb_recv_datagram. The flags argument must match the one
269 * used for skb_recv_datagram.
270 *
271 * If the MSG_PEEK flag is set, and the packet is still on the
272 * receive queue of the socket, it will be taken off the queue
273 * before it is freed.
274 *
275 * This function currently only disables BH when acquiring the
276 * sk_receive_queue lock. Therefore it must not be used in a
277 * context where that lock is acquired in an IRQ context.
278 *
279 * It returns 0 if the packet was removed by us.
280 */
281
282 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
283 {
284 int err = 0;
285
286 if (flags & MSG_PEEK) {
287 err = -ENOENT;
288 spin_lock_bh(&sk->sk_receive_queue.lock);
289 if (skb == skb_peek(&sk->sk_receive_queue)) {
290 __skb_unlink(skb, &sk->sk_receive_queue);
291 atomic_dec(&skb->users);
292 err = 0;
293 }
294 spin_unlock_bh(&sk->sk_receive_queue.lock);
295 }
296
297 kfree_skb(skb);
298 atomic_inc(&sk->sk_drops);
299 sk_mem_reclaim_partial(sk);
300
301 return err;
302 }
303 EXPORT_SYMBOL(skb_kill_datagram);
304
305 /**
306 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
307 * @skb: buffer to copy
308 * @offset: offset in the buffer to start copying from
309 * @to: io vector to copy to
310 * @len: amount of data to copy from buffer to iovec
311 *
312 * Note: the iovec is modified during the copy.
313 */
314 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
315 struct iovec *to, int len)
316 {
317 int start = skb_headlen(skb);
318 int i, copy = start - offset;
319 struct sk_buff *frag_iter;
320
321 trace_skb_copy_datagram_iovec(skb, len);
322
323 /* Copy header. */
324 if (copy > 0) {
325 if (copy > len)
326 copy = len;
327 if (memcpy_toiovec(to, skb->data + offset, copy))
328 goto fault;
329 if ((len -= copy) == 0)
330 return 0;
331 offset += copy;
332 }
333
334 /* Copy paged appendix. Hmm... why does this look so complicated? */
335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
336 int end;
337 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
338
339 WARN_ON(start > offset + len);
340
341 end = start + skb_frag_size(frag);
342 if ((copy = end - offset) > 0) {
343 int err;
344 u8 *vaddr;
345 struct page *page = skb_frag_page(frag);
346
347 if (copy > len)
348 copy = len;
349 vaddr = kmap(page);
350 err = memcpy_toiovec(to, vaddr + frag->page_offset +
351 offset - start, copy);
352 kunmap(page);
353 if (err)
354 goto fault;
355 if (!(len -= copy))
356 return 0;
357 offset += copy;
358 }
359 start = end;
360 }
361
362 skb_walk_frags(skb, frag_iter) {
363 int end;
364
365 WARN_ON(start > offset + len);
366
367 end = start + frag_iter->len;
368 if ((copy = end - offset) > 0) {
369 if (copy > len)
370 copy = len;
371 if (skb_copy_datagram_iovec(frag_iter,
372 offset - start,
373 to, copy))
374 goto fault;
375 if ((len -= copy) == 0)
376 return 0;
377 offset += copy;
378 }
379 start = end;
380 }
381 if (!len)
382 return 0;
383
384 fault:
385 return -EFAULT;
386 }
387 EXPORT_SYMBOL(skb_copy_datagram_iovec);
388
389 /**
390 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
391 * @skb: buffer to copy
392 * @offset: offset in the buffer to start copying from
393 * @to: io vector to copy to
394 * @to_offset: offset in the io vector to start copying to
395 * @len: amount of data to copy from buffer to iovec
396 *
397 * Returns 0 or -EFAULT.
398 * Note: the iovec is not modified during the copy.
399 */
400 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
401 const struct iovec *to, int to_offset,
402 int len)
403 {
404 int start = skb_headlen(skb);
405 int i, copy = start - offset;
406 struct sk_buff *frag_iter;
407
408 /* Copy header. */
409 if (copy > 0) {
410 if (copy > len)
411 copy = len;
412 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
413 goto fault;
414 if ((len -= copy) == 0)
415 return 0;
416 offset += copy;
417 to_offset += copy;
418 }
419
420 /* Copy paged appendix. Hmm... why does this look so complicated? */
421 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
422 int end;
423 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
424
425 WARN_ON(start > offset + len);
426
427 end = start + skb_frag_size(frag);
428 if ((copy = end - offset) > 0) {
429 int err;
430 u8 *vaddr;
431 struct page *page = skb_frag_page(frag);
432
433 if (copy > len)
434 copy = len;
435 vaddr = kmap(page);
436 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
437 offset - start, to_offset, copy);
438 kunmap(page);
439 if (err)
440 goto fault;
441 if (!(len -= copy))
442 return 0;
443 offset += copy;
444 to_offset += copy;
445 }
446 start = end;
447 }
448
449 skb_walk_frags(skb, frag_iter) {
450 int end;
451
452 WARN_ON(start > offset + len);
453
454 end = start + frag_iter->len;
455 if ((copy = end - offset) > 0) {
456 if (copy > len)
457 copy = len;
458 if (skb_copy_datagram_const_iovec(frag_iter,
459 offset - start,
460 to, to_offset,
461 copy))
462 goto fault;
463 if ((len -= copy) == 0)
464 return 0;
465 offset += copy;
466 to_offset += copy;
467 }
468 start = end;
469 }
470 if (!len)
471 return 0;
472
473 fault:
474 return -EFAULT;
475 }
476 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
477
478 /**
479 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
480 * @skb: buffer to copy
481 * @offset: offset in the buffer to start copying to
482 * @from: io vector to copy to
483 * @from_offset: offset in the io vector to start copying from
484 * @len: amount of data to copy to buffer from iovec
485 *
486 * Returns 0 or -EFAULT.
487 * Note: the iovec is not modified during the copy.
488 */
489 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
490 const struct iovec *from, int from_offset,
491 int len)
492 {
493 int start = skb_headlen(skb);
494 int i, copy = start - offset;
495 struct sk_buff *frag_iter;
496
497 /* Copy header. */
498 if (copy > 0) {
499 if (copy > len)
500 copy = len;
501 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
502 copy))
503 goto fault;
504 if ((len -= copy) == 0)
505 return 0;
506 offset += copy;
507 from_offset += copy;
508 }
509
510 /* Copy paged appendix. Hmm... why does this look so complicated? */
511 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
512 int end;
513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
514
515 WARN_ON(start > offset + len);
516
517 end = start + skb_frag_size(frag);
518 if ((copy = end - offset) > 0) {
519 int err;
520 u8 *vaddr;
521 struct page *page = skb_frag_page(frag);
522
523 if (copy > len)
524 copy = len;
525 vaddr = kmap(page);
526 err = memcpy_fromiovecend(vaddr + frag->page_offset +
527 offset - start,
528 from, from_offset, copy);
529 kunmap(page);
530 if (err)
531 goto fault;
532
533 if (!(len -= copy))
534 return 0;
535 offset += copy;
536 from_offset += copy;
537 }
538 start = end;
539 }
540
541 skb_walk_frags(skb, frag_iter) {
542 int end;
543
544 WARN_ON(start > offset + len);
545
546 end = start + frag_iter->len;
547 if ((copy = end - offset) > 0) {
548 if (copy > len)
549 copy = len;
550 if (skb_copy_datagram_from_iovec(frag_iter,
551 offset - start,
552 from,
553 from_offset,
554 copy))
555 goto fault;
556 if ((len -= copy) == 0)
557 return 0;
558 offset += copy;
559 from_offset += copy;
560 }
561 start = end;
562 }
563 if (!len)
564 return 0;
565
566 fault:
567 return -EFAULT;
568 }
569 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
570
571 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
572 u8 __user *to, int len,
573 __wsum *csump)
574 {
575 int start = skb_headlen(skb);
576 int i, copy = start - offset;
577 struct sk_buff *frag_iter;
578 int pos = 0;
579
580 /* Copy header. */
581 if (copy > 0) {
582 int err = 0;
583 if (copy > len)
584 copy = len;
585 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
586 *csump, &err);
587 if (err)
588 goto fault;
589 if ((len -= copy) == 0)
590 return 0;
591 offset += copy;
592 to += copy;
593 pos = copy;
594 }
595
596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
597 int end;
598 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
599
600 WARN_ON(start > offset + len);
601
602 end = start + skb_frag_size(frag);
603 if ((copy = end - offset) > 0) {
604 __wsum csum2;
605 int err = 0;
606 u8 *vaddr;
607 struct page *page = skb_frag_page(frag);
608
609 if (copy > len)
610 copy = len;
611 vaddr = kmap(page);
612 csum2 = csum_and_copy_to_user(vaddr +
613 frag->page_offset +
614 offset - start,
615 to, copy, 0, &err);
616 kunmap(page);
617 if (err)
618 goto fault;
619 *csump = csum_block_add(*csump, csum2, pos);
620 if (!(len -= copy))
621 return 0;
622 offset += copy;
623 to += copy;
624 pos += copy;
625 }
626 start = end;
627 }
628
629 skb_walk_frags(skb, frag_iter) {
630 int end;
631
632 WARN_ON(start > offset + len);
633
634 end = start + frag_iter->len;
635 if ((copy = end - offset) > 0) {
636 __wsum csum2 = 0;
637 if (copy > len)
638 copy = len;
639 if (skb_copy_and_csum_datagram(frag_iter,
640 offset - start,
641 to, copy,
642 &csum2))
643 goto fault;
644 *csump = csum_block_add(*csump, csum2, pos);
645 if ((len -= copy) == 0)
646 return 0;
647 offset += copy;
648 to += copy;
649 pos += copy;
650 }
651 start = end;
652 }
653 if (!len)
654 return 0;
655
656 fault:
657 return -EFAULT;
658 }
659
660 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
661 {
662 __sum16 sum;
663
664 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
665 if (likely(!sum)) {
666 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
667 netdev_rx_csum_fault(skb->dev);
668 skb->ip_summed = CHECKSUM_UNNECESSARY;
669 }
670 return sum;
671 }
672 EXPORT_SYMBOL(__skb_checksum_complete_head);
673
674 __sum16 __skb_checksum_complete(struct sk_buff *skb)
675 {
676 return __skb_checksum_complete_head(skb, skb->len);
677 }
678 EXPORT_SYMBOL(__skb_checksum_complete);
679
680 /**
681 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
682 * @skb: skbuff
683 * @hlen: hardware length
684 * @iov: io vector
685 *
686 * Caller _must_ check that skb will fit to this iovec.
687 *
688 * Returns: 0 - success.
689 * -EINVAL - checksum failure.
690 * -EFAULT - fault during copy. Beware, in this case iovec
691 * can be modified!
692 */
693 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
694 int hlen, struct iovec *iov)
695 {
696 __wsum csum;
697 int chunk = skb->len - hlen;
698
699 if (!chunk)
700 return 0;
701
702 /* Skip filled elements.
703 * Pretty silly, look at memcpy_toiovec, though 8)
704 */
705 while (!iov->iov_len)
706 iov++;
707
708 if (iov->iov_len < chunk) {
709 if (__skb_checksum_complete(skb))
710 goto csum_error;
711 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
712 goto fault;
713 } else {
714 csum = csum_partial(skb->data, hlen, skb->csum);
715 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
716 chunk, &csum))
717 goto fault;
718 if (csum_fold(csum))
719 goto csum_error;
720 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
721 netdev_rx_csum_fault(skb->dev);
722 iov->iov_len -= chunk;
723 iov->iov_base += chunk;
724 }
725 return 0;
726 csum_error:
727 return -EINVAL;
728 fault:
729 return -EFAULT;
730 }
731 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
732
733 /**
734 * datagram_poll - generic datagram poll
735 * @file: file struct
736 * @sock: socket
737 * @wait: poll table
738 *
739 * Datagram poll: Again totally generic. This also handles
740 * sequenced packet sockets providing the socket receive queue
741 * is only ever holding data ready to receive.
742 *
743 * Note: when you _don't_ use this routine for this protocol,
744 * and you use a different write policy from sock_writeable()
745 * then please supply your own write_space callback.
746 */
747 unsigned int datagram_poll(struct file *file, struct socket *sock,
748 poll_table *wait)
749 {
750 struct sock *sk = sock->sk;
751 unsigned int mask;
752
753 sock_poll_wait(file, sk_sleep(sk), wait);
754 mask = 0;
755
756 /* exceptional events? */
757 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
758 mask |= POLLERR |
759 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
760
761 if (sk->sk_shutdown & RCV_SHUTDOWN)
762 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
763 if (sk->sk_shutdown == SHUTDOWN_MASK)
764 mask |= POLLHUP;
765
766 /* readable? */
767 if (!skb_queue_empty(&sk->sk_receive_queue))
768 mask |= POLLIN | POLLRDNORM;
769
770 /* Connection-based need to check for termination and startup */
771 if (connection_based(sk)) {
772 if (sk->sk_state == TCP_CLOSE)
773 mask |= POLLHUP;
774 /* connection hasn't started yet? */
775 if (sk->sk_state == TCP_SYN_SENT)
776 return mask;
777 }
778
779 /* writable? */
780 if (sock_writeable(sk))
781 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
782 else
783 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
784
785 return mask;
786 }
787 EXPORT_SYMBOL(datagram_poll);