From: Nick Piggin Date: Sun, 19 Oct 2008 03:26:57 +0000 (-0700) Subject: mm: pagecache insertion fewer atomics X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f45840b5c128445da70e7ec33adc47b4a12bdaf4;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git mm: pagecache insertion fewer atomics Setting and clearing the page locked when inserting it into swapcache / pagecache when it has no other references can use non-atomic page flags operations because no other CPU may be operating on it at this time. This saves one atomic operation when inserting a page into pagecache. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 4b6c4d8d26b8..7334b2b6c4c6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); -static inline void set_page_locked(struct page *page) +static inline void __set_page_locked(struct page *page) { - set_bit(PG_locked, &page->flags); + __set_bit(PG_locked, &page->flags); } -static inline void clear_page_locked(struct page *page) +static inline void __clear_page_locked(struct page *page) { - clear_bit(PG_locked, &page->flags); + __clear_bit(PG_locked, &page->flags); } static inline int trylock_page(struct page *page) @@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run set_page_locked() against it. + * the page is new, so we can just run __set_page_locked() against it. */ static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; - set_page_locked(page); + __set_page_locked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) - clear_page_locked(page); + __clear_page_locked(page); return error; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 43cda7b4b808..3353c9029cef 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * re-using the just freed swap entry for an existing page. * May fail (-ENOMEM) if radix-tree node allocation failed. */ - set_page_locked(new_page); + __set_page_locked(new_page); SetPageSwapBacked(new_page); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); if (likely(!err)) { @@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, return new_page; } ClearPageSwapBacked(new_page); - clear_page_locked(new_page); + __clear_page_locked(new_page); swap_free(entry); } while (err != -ENOMEM);