mm: add support for direct_IO to highmem pages
authorMel Gorman <mgorman@suse.de>
Tue, 31 Jul 2012 23:45:02 +0000 (16:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Aug 2012 01:42:47 +0000 (18:42 -0700)
The patch "mm: add support for a filesystem to activate swap files and use
direct_IO for writing swap pages" added support for using direct_IO to
write swap pages but it is insufficient for highmem pages.

To support highmem pages, this patch kmaps() the page before calling the
direct_IO() handler.  As direct_IO deals with virtual addresses an
additional helper is necessary for get_kernel_pages() to lookup the struct
page for a kmap virtual address.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Eric Paris <eparis@redhat.com>
Cc: James Morris <jmorris@namei.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Neil Brown <neilb@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: Xiaotian Feng <dfeng@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/highmem.h
mm/highmem.c
mm/page_io.c
mm/swap.c

index 774fa47b3b5b2a19f810a166682e1a0912827f3c..ef788b5b4a3504b069818e33145b2b3e578235fb 100644 (file)
@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages;
 
 void kmap_flush_unused(void);
 
+struct page *kmap_to_page(void *addr);
+
 #else /* CONFIG_HIGHMEM */
 
 static inline unsigned int nr_free_highpages(void) { return 0; }
 
+static inline struct page *kmap_to_page(void *addr)
+{
+       return virt_to_page(addr);
+}
+
 #define totalhigh_pages 0UL
 
 #ifndef ARCH_HAS_KMAP
index 57d82c6250c308e36089f81a8e902b729391335e..d517cd16a6eb91e8df18f3cbd79cd67621b27d3f 100644 (file)
@@ -94,6 +94,18 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
                do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
 #endif
 
+struct page *kmap_to_page(void *vaddr)
+{
+       unsigned long addr = (unsigned long)vaddr;
+
+       if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+               int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
+               return pte_page(pkmap_page_table[i]);
+       }
+
+       return virt_to_page(addr);
+}
+
 static void flush_all_zero_pkmaps(void)
 {
        int i;
index 4a379629e31f17f1bc3b93c02b12fb489653cbce..78eee32ee4860b17155413bb37c211bbd8b9b60a 100644 (file)
@@ -205,7 +205,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                struct file *swap_file = sis->swap_file;
                struct address_space *mapping = swap_file->f_mapping;
                struct iovec iov = {
-                       .iov_base = page_address(page),
+                       .iov_base = kmap(page),
                        .iov_len  = PAGE_SIZE,
                };
 
@@ -218,6 +218,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
                                                &kiocb, &iov,
                                                kiocb.ki_pos, 1);
+               kunmap(page);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
                        ret = 0;
index 7d7f80c8044aa80f3bd3f5dc3b6ed9e4401b6c8b..77825883298f1f1843e068c2e5ad0d55706cf873 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -258,8 +258,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
                if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
                        return seg;
 
-               /* virt_to_page sanity checks the PFN */
-               pages[seg] = virt_to_page(kiov[seg].iov_base);
+               pages[seg] = kmap_to_page(kiov[seg].iov_base);
                page_cache_get(pages[seg]);
        }