sk_stream_mem_schedule(sk, skb->truesize, 1);
}
+static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
+{
+ return size <= sk->sk_forward_alloc ||
+ sk_stream_mem_schedule(sk, size, 0);
+}
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
skb = alloc_skb_fclone(size + hdr_len, gfp);
if (skb) {
skb->truesize += mem;
- if (sk->sk_forward_alloc >= (int)skb->truesize ||
- sk_stream_mem_schedule(sk, skb->truesize, 0)) {
+ if (sk_stream_wmem_schedule(sk, skb->truesize)) {
skb_reserve(skb, hdr_len);
return skb;
}
{
struct page *page = NULL;
- if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
- sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
+ if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
page = alloc_pages(sk->sk_allocation, 0);
else {
sk->sk_prot->enter_memory_pressure();
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (sk->sk_forward_alloc < copy &&
- !sk_stream_mem_schedule(sk, copy, 0))
+ if (!sk_stream_wmem_schedule(sk, copy))
goto wait_for_memory;
if (can_coalesce) {