and CONFIG_NET_DMA is enabled.
Default: 4096
+UDP variables:
+
+udp_mem - vector of 3 INTEGERs: min, pressure, max
+ Number of pages allowed for queueing by all UDP sockets.
+
+ min: Below this number of pages UDP is not bothered about its
+ memory appetite. When amount of memory allocated by UDP exceeds
+ this number, UDP starts to moderate memory usage.
+
+ pressure: This value was introduced to follow format of tcp_mem.
+
+ max: Number of pages allowed for queueing by all UDP sockets.
+
+ Default is calculated at boot time from amount of available memory.
+
+udp_rmem_min - INTEGER
+ Minimal size of receive buffer used by UDP sockets in moderation.
+ Each UDP socket is able to use the size for receiving data, even if
+ total pages of UDP sockets exceed udp_mem pressure. The unit is byte.
+ Default: 4096
+
+udp_wmem_min - INTEGER
+ Minimal size of send buffer used by UDP sockets in moderation.
+ Each UDP socket is able to use the size for sending data, even if
+ total pages of UDP sockets exceed udp_mem pressure. The unit is byte.
+ Default: 4096
+
CIPSOv4 Variables:
cipso_cache_enable - BOOLEAN
extern struct proto udp_prot;
+extern atomic_t udp_memory_allocated;
+
+/* sysctl variables for udp */
+extern int sysctl_udp_mem[3];
+extern int sysctl_udp_rmem_min;
+extern int sysctl_udp_wmem_min;
+
struct sk_buff;
/*
extern int udp4_proc_init(void);
extern void udp4_proc_exit(void);
#endif
+
+extern void udp_init(void);
#endif /* _UDP_H */
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue);
+ sk_mem_reclaim(sk);
+
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
printk("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
/* Setup TCP slab cache for open requests. */
tcp_init();
+ /* Setup UDP memory threshold */
+ udp_init();
+
/* Add UDP-Lite (RFC 3828) */
udplite4_register();
sock_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
atomic_read(&tcp_memory_allocated));
- seq_printf(seq, "UDP: inuse %d\n", sock_prot_inuse(&udp_prot));
+ seq_printf(seq, "UDP: inuse %d mem %d\n", sock_prot_inuse(&udp_prot),
+ atomic_read(&udp_memory_allocated));
seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse(&udplite_prot));
seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse(&raw_prot));
seq_printf(seq, "FRAG: inuse %d memory %d\n",
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp.h>
+#include <net/udp.h>
#include <net/cipso_ipv4.h>
#include <net/inet_frag.h>
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "udp_mem",
+ .data = &sysctl_udp_mem,
+ .maxlen = sizeof(sysctl_udp_mem),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "udp_rmem_min",
+ .data = &sysctl_udp_rmem_min,
+ .maxlen = sizeof(sysctl_udp_rmem_min),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "udp_wmem_min",
+ .data = &sysctl_udp_wmem_min,
+ .maxlen = sizeof(sysctl_udp_wmem_min),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero
+ },
{ .ctl_name = 0 }
};
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
+#include <linux/bootmem.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
struct hlist_head udp_hash[UDP_HTABLE_SIZE];
DEFINE_RWLOCK(udp_hash_lock);
+int sysctl_udp_mem[3] __read_mostly;
+int sysctl_udp_rmem_min __read_mostly;
+int sysctl_udp_wmem_min __read_mostly;
+
+EXPORT_SYMBOL(sysctl_udp_mem);
+EXPORT_SYMBOL(sysctl_udp_rmem_min);
+EXPORT_SYMBOL(sysctl_udp_wmem_min);
+
+atomic_t udp_memory_allocated;
+EXPORT_SYMBOL(udp_memory_allocated);
+
static inline int __udp_lib_lport_inuse(__u16 num,
const struct hlist_head udptable[])
{
err = ulen;
out_free:
+ lock_sock(sk);
skb_free_datagram(sk, skb);
+ release_sock(sk);
out:
return err;
csum_copy_err:
+ lock_sock(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
+ release_sock(sk);
if (noblock)
return -EAGAIN;
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1) {
- int ret = udp_queue_rcv_skb(sk, skb1);
+ int ret = 0;
+
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ ret = udp_queue_rcv_skb(sk, skb1);
+ else
+ sk_add_backlog(sk, skb1);
+ bh_unlock_sock(sk);
+
if (ret > 0)
/* we should probably re-process instead
* of dropping packets here. */
inet_iif(skb), udptable);
if (sk != NULL) {
- int ret = udp_queue_rcv_skb(sk, skb);
+ int ret = 0;
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ ret = udp_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v4_get_port,
+ .memory_allocated = &udp_memory_allocated,
+ .sysctl_mem = sysctl_udp_mem,
+ .sysctl_wmem = &sysctl_udp_wmem_min,
+ .sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
}
#endif /* CONFIG_PROC_FS */
+void __init udp_init(void)
+{
+ unsigned long limit;
+
+ /* Set the pressure threshold up by the same strategy of TCP. It is a
+ * fraction of global memory that is up to 1/2 at 256 MB, decreasing
+ * toward zero with the amount of memory, with a floor of 128 pages.
+ */
+ limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
+ limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = max(limit, 128UL);
+ sysctl_udp_mem[0] = limit / 4 * 3;
+ sysctl_udp_mem[1] = limit;
+ sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
+
+ sysctl_udp_rmem_min = SK_MEM_QUANTUM;
+ sysctl_udp_wmem_min = SK_MEM_QUANTUM;
+}
+
EXPORT_SYMBOL(udp_disconnect);
EXPORT_SYMBOL(udp_hash);
EXPORT_SYMBOL(udp_hash_lock);
err = ulen;
out_free:
+ lock_sock(sk);
skb_free_datagram(sk, skb);
+ release_sock(sk);
out:
return err;
csum_copy_err:
+ lock_sock(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
+ release_sock(sk);
if (flags & MSG_DONTWAIT)
return -EAGAIN;
while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
- if (buff)
- udpv6_queue_rcv_skb(sk2, buff);
+ if (buff) {
+ bh_lock_sock_nested(sk2);
+ if (!sock_owned_by_user(sk2))
+ udpv6_queue_rcv_skb(sk2, buff);
+ else
+ sk_add_backlog(sk2, buff);
+ bh_unlock_sock(sk2);
+ }
}
- udpv6_queue_rcv_skb(sk, skb);
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
out:
read_unlock(&udp_hash_lock);
return 0;
/* deliver */
- udpv6_queue_rcv_skb(sk, skb);
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
sock_put(sk);
return 0;
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v6_get_port,
+ .memory_allocated = &udp_memory_allocated,
+ .sysctl_mem = sysctl_udp_mem,
+ .sysctl_wmem = &sysctl_udp_wmem_min,
+ .sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,