[AF_PACKET]: Convert PACKET_MMAP over to vm_insert_page().
authorDavid S. Miller <davem@sunset.davemloft.net>
Wed, 7 Dec 2005 00:38:35 +0000 (16:38 -0800)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 7 Dec 2005 00:38:35 +0000 (16:38 -0800)
So we can properly use __GFP_COMP and avoid the use of
PG_reserved pages.

With extremely helpful review from Hugh Dickins.

Signed-off-by: David S. Miller <davem@davemloft.net>
net/packet/af_packet.c

index 499ae3df4a440b42c0765d1e691bb647c2230050..3e2462760413ee5871cbd3061f57be83df6c1aa1 100644 (file)
@@ -1587,23 +1587,47 @@ static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
        return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
 }
 
-static void free_pg_vec(char **pg_vec, unsigned order, unsigned len)
+static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
 {
        int i;
 
-       for (i=0; i<len; i++) {
-               if (pg_vec[i]) {
-                       struct page *page, *pend;
-
-                       pend = pg_vec_endpage(pg_vec[i], order);
-                       for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
-                               ClearPageReserved(page);
-                       free_pages((unsigned long)pg_vec[i], order);
-               }
+       for (i = 0; i < len; i++) {
+               if (likely(pg_vec[i]))
+                       free_pages((unsigned long) pg_vec[i], order);
        }
        kfree(pg_vec);
 }
 
+static inline char *alloc_one_pg_vec_page(unsigned long order)
+{
+       return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
+                                        order);
+}
+
+static char **alloc_pg_vec(struct tpacket_req *req, int order)
+{
+       unsigned int block_nr = req->tp_block_nr;
+       char **pg_vec;
+       int i;
+
+       pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
+       if (unlikely(!pg_vec))
+               goto out;
+
+       for (i = 0; i < block_nr; i++) {
+               pg_vec[i] = alloc_one_pg_vec_page(order);
+               if (unlikely(!pg_vec[i]))
+                       goto out_free_pgvec;
+       }
+
+out:
+       return pg_vec;
+
+out_free_pgvec:
+       free_pg_vec(pg_vec, order, block_nr);
+       pg_vec = NULL;
+       goto out;
+}
 
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
 {
@@ -1617,64 +1641,46 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
 
                /* Sanity tests and some calculations */
 
-               if (po->pg_vec)
+               if (unlikely(po->pg_vec))
                        return -EBUSY;
 
-               if ((int)req->tp_block_size <= 0)
+               if (unlikely((int)req->tp_block_size <= 0))
                        return -EINVAL;
-               if (req->tp_block_size&(PAGE_SIZE-1))
+               if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
                        return -EINVAL;
-               if (req->tp_frame_size < TPACKET_HDRLEN)
+               if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
                        return -EINVAL;
-               if (req->tp_frame_size&(TPACKET_ALIGNMENT-1))
+               if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
                        return -EINVAL;
 
                po->frames_per_block = req->tp_block_size/req->tp_frame_size;
-               if (po->frames_per_block <= 0)
+               if (unlikely(po->frames_per_block <= 0))
                        return -EINVAL;
-               if (po->frames_per_block*req->tp_block_nr != req->tp_frame_nr)
+               if (unlikely((po->frames_per_block * req->tp_block_nr) !=
+                            req->tp_frame_nr))
                        return -EINVAL;
-               /* OK! */
-
-               /* Allocate page vector */
-               while ((PAGE_SIZE<<order) < req->tp_block_size)
-                       order++;
 
                err = -ENOMEM;
-
-               pg_vec = kmalloc(req->tp_block_nr*sizeof(char *), GFP_KERNEL);
-               if (pg_vec == NULL)
+               order = get_order(req->tp_block_size);
+               pg_vec = alloc_pg_vec(req, order);
+               if (unlikely(!pg_vec))
                        goto out;
-               memset(pg_vec, 0, req->tp_block_nr*sizeof(char **));
-
-               for (i=0; i<req->tp_block_nr; i++) {
-                       struct page *page, *pend;
-                       pg_vec[i] = (char *)__get_free_pages(GFP_KERNEL, order);
-                       if (!pg_vec[i])
-                               goto out_free_pgvec;
-
-                       pend = pg_vec_endpage(pg_vec[i], order);
-                       for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
-                               SetPageReserved(page);
-               }
-               /* Page vector is allocated */
 
                l = 0;
-               for (i=0; i<req->tp_block_nr; i++) {
+               for (i = 0; i < req->tp_block_nr; i++) {
                        char *ptr = pg_vec[i];
                        struct tpacket_hdr *header;
                        int k;
 
-                       for (k=0; k<po->frames_per_block; k++) {
-                               
-                               header = (struct tpacket_hdr*)ptr;
+                       for (k = 0; k < po->frames_per_block; k++) {
+                               header = (struct tpacket_hdr *) ptr;
                                header->tp_status = TP_STATUS_KERNEL;
                                ptr += req->tp_frame_size;
                        }
                }
                /* Done */
        } else {
-               if (req->tp_frame_nr)
+               if (unlikely(req->tp_frame_nr))
                        return -EINVAL;
        }
 
@@ -1701,7 +1707,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                pg_vec = XC(po->pg_vec, pg_vec);
-               po->frame_max = req->tp_frame_nr-1;
+               po->frame_max = (req->tp_frame_nr - 1);
                po->head = 0;
                po->frame_size = req->tp_frame_size;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -1728,7 +1734,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
 
        release_sock(sk);
 
-out_free_pgvec:
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
@@ -1755,17 +1760,19 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
        if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
                goto out;
 
-       atomic_inc(&po->mapped);
        start = vma->vm_start;
-       err = -EAGAIN;
-       for (i=0; i<po->pg_vec_len; i++) {
-               if (remap_pfn_range(vma, start,
-                                    __pa(po->pg_vec[i]) >> PAGE_SHIFT,
-                                    po->pg_vec_pages*PAGE_SIZE,
-                                    vma->vm_page_prot))
-                       goto out;
-               start += po->pg_vec_pages*PAGE_SIZE;
+       for (i = 0; i < po->pg_vec_len; i++) {
+               struct page *page = virt_to_page(po->pg_vec[i]);
+               int pg_num;
+
+               for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
+                       err = vm_insert_page(vma, start, page);
+                       if (unlikely(err))
+                               goto out;
+                       start += PAGE_SIZE;
+               }
        }
+       atomic_inc(&po->mapped);
        vma->vm_ops = &packet_mmap_ops;
        err = 0;