From: Nick Piggin Date: Wed, 22 Mar 2006 08:08:30 +0000 (-0800) Subject: [PATCH] sg: use compound pages X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f9aed0e2537174b95908f48b6052ae37196c9390;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [PATCH] sg: use compound pages sg increments the refcount of constituent pages in its higher order memory allocations when they are about to be mapped by userspace. This is done so the subsequent get_page/put_page when doing the mapping and unmapping does not free the page. Move over to the preferred way, that is, using compound pages instead. This fixes a whole class of possible obscure bugs where a get_user_pages on a constituent page may outlast the user mappings or even the driver. Signed-off-by: Nick Piggin Cc: Hugh Dickins Cc: Douglas Gilbert Cc: James Bottomley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 5a0a19322d01..0e0ca8fc7318 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1140,32 +1140,6 @@ sg_fasync(int fd, struct file *filp, int mode) return (retval < 0) ? retval : 0; } -/* When startFinish==1 increments page counts for pages other than the - first of scatter gather elements obtained from alloc_pages(). - When startFinish==0 decrements ... */ -static void -sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) -{ - struct scatterlist *sg = rsv_schp->buffer; - struct page *page; - int k, m; - - SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", - startFinish, rsv_schp->k_use_sg)); - /* N.B. correction _not_ applied to base page of each allocation */ - for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { - for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) { - page = sg->page; - if (startFinish) - get_page(page); - else { - if (page_count(page) > 0) - __put_page(page); - } - } - } -} - static struct page * sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) { @@ -1237,10 +1211,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) sa += len; } - if (0 == sfp->mmap_called) { - sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ - sfp->mmap_called = 1; - } + sfp->mmap_called = 1; vma->vm_flags |= VM_RESERVED; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; @@ -2395,8 +2366,6 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); - if (sfp->mmap_called) - sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */ sg_remove_scat(&sfp->reserve); } sfp->parentdp = NULL; @@ -2478,9 +2447,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp) return resp; if (lowDma) - page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN; + page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN; else - page_mask = GFP_ATOMIC | __GFP_NOWARN; + page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; order++, a_size <<= 1) ;