#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
-static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
spin_unlock(&si->lock);
return (swp_entry_t) {0};
}
+EXPORT_SYMBOL_GPL(get_swap_page_of_type);
-static struct swap_info_struct *swap_info_get(swp_entry_t entry)
+#ifdef CONFIG_MEMCG
+swp_entry_t get_swap_page_by_memcg(struct page *page)
+{
+ struct swap_info_struct *si;
+ int type, next, wrapped;
+ pgoff_t offset;
+
+ /* Go to original get_swap_page if we have only 1 or no swap area. */
+ if (nr_swapfiles <= 1)
+ return get_swap_page();
+
+ spin_lock(&swap_lock);
+ if (atomic_long_read(&nr_swap_pages) <= 0)
+ goto noswap;
+ atomic_long_dec(&nr_swap_pages);
+
+ /* In which memcg */
+ if (memcg_is_root(page)) {
+ /* High to low priority */
+ wrapped = 1;
+ type = swap_list.head;
+ } else {
+ /* Low to high priority */
+ wrapped = 0;
+ si = swap_info[swap_list.head];
+ type = si->next;
+ }
+
+ /* Scan for an empty swap entry */
+ for (; (type >= 0) || (wrapped == 0); type = next) {
+ if (type < 0) {
+ wrapped++;
+ next = swap_list.head;
+ continue;
+ }
+
+ si = swap_info[type];
+ next = si->next;
+ spin_lock(&si->lock);
+ if (!si->highest_bit) {
+ spin_unlock(&si->lock);
+ continue;
+ }
+ if (!(si->flags & SWP_WRITEOK)) {
+ spin_unlock(&si->lock);
+ continue;
+ }
+ spin_unlock(&swap_lock);
+ /* This is called for allocating swap entry for cache */
+ offset = scan_swap_map(si, SWAP_HAS_CACHE);
+ spin_unlock(&si->lock);
+ if (offset)
+ return swp_entry(type, offset);
+ spin_lock(&swap_lock);
+ }
+
+ atomic_long_inc(&nr_swap_pages);
+noswap:
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
+}
+#endif
+
+static unsigned int find_next_to_unuse(struct swap_info_struct *si,
+ unsigned int prev, bool frontswap);
+
+void get_swap_range_of_type(int type, swp_entry_t *start, swp_entry_t *end,
+ unsigned int limit)
+{
+ struct swap_info_struct *si;
+ pgoff_t start_at;
+ unsigned int i;
+
+ *start = swp_entry(0, 0);
+ *end = swp_entry(0, 0);
+ si = swap_info[type];
+ spin_lock(&si->lock);
+ if (si && (si->flags & SWP_WRITEOK)) {
+ /* This is called for allocating swap entry, not cache */
+ start_at = scan_swap_map(si, 1);
+ if (start_at) {
+ unsigned long stop_at = find_next_to_unuse(si, start_at, 0);
+ if (stop_at > start_at)
+ stop_at--;
+ else
+ stop_at = si->max - 1;
+ if (stop_at - start_at + 1 > limit)
+ stop_at = min_t(unsigned int,
+ start_at + limit - 1,
+ si->max - 1);
+ /* Mark them used */
+ for (i = start_at; i <= stop_at; i++)
+ si->swap_map[i] = 1;
+ /* first page already done above */
+ si->inuse_pages += stop_at - start_at;
+
+ atomic_long_sub(stop_at - start_at + 1, &nr_swap_pages);
+ if (start_at + 1 == si->lowest_bit)
+ si->lowest_bit = stop_at + 1;
+ if (si->inuse_pages == si->pages) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ }
+ si->cluster_next = stop_at + 1;
+ *start = swp_entry(type, start_at);
+ *end = swp_entry(type, stop_at);
+ }
+ }
+ spin_unlock(&si->lock);
+}
+EXPORT_SYMBOL_GPL(get_swap_range_of_type);
+
+struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
unsigned long offset, type;
return NULL;
}
+void swap_info_unlock(struct swap_info_struct *si)
+{
+ spin_unlock(&si->lock);
+}
+
/*
* This swap type frees swap entry, check if it is the highest priority swap
* type which just frees swap entry. get_swap_page() uses
spin_unlock(&p->lock);
}
}
+EXPORT_SYMBOL_GPL(swap_free);
/*
* How many references to page are currently swapped out?
* Note that the type of this function is sector_t, but it returns page offset
* into the bdev, not sector offset.
*/
-static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
+sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
struct swap_info_struct *sis;
struct swap_extent *start_se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
+EXPORT_SYMBOL_GPL(map_swap_entry);
/*
* Returns the page offset into bdev for the specified page's swap entry.
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef CONFIG_TOI
+ // FIXME: Turn it off due to current->mm may be NULL in kernel space
+ // by calling sys_swapoff(swapfilename) in disable_swapfile() @ tuxonice_swap.c
+ pr_warn("[HIB/SWAP] [%s] file(%s) current(%p/%d/%s) current->mm(%p)\n", __func__, specialfile, current, current->pid, current->comm, current->mm);
+ WARN_ON(!current->mm);
+#else
BUG_ON(!current->mm);
+#endif
pathname = getname(specialfile);
if (IS_ERR(pathname))
putname(pathname);
return err;
}
+EXPORT_SYMBOL_GPL(sys_swapoff);
#ifdef CONFIG_PROC_FS
static unsigned swaps_poll(struct file *file, poll_table *wait)
mutex_unlock(&inode->i_mutex);
return error;
}
+EXPORT_SYMBOL_GPL(sys_swapon);
void si_swapinfo(struct sysinfo *val)
{
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
+EXPORT_SYMBOL_GPL(si_swapinfo);
/*
* Verify that a swap entry is valid and increment its swap map count.
VM_BUG_ON(!PageSwapCache(page));
return swp_offset(swap);
}
+
EXPORT_SYMBOL_GPL(__page_file_index);
+struct swap_info_struct *get_swap_info_struct(unsigned type)
+{
+ return swap_info[type];
+}
+EXPORT_SYMBOL_GPL(get_swap_info_struct);
+
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's