remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / include / linux / hugetlb.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <asm/pgtable.h>
13
14struct ctl_table;
15struct user_struct;
16struct mmu_gather;
17
18#ifndef is_hugepd
19/*
20 * Some architectures requires a hugepage directory format that is
21 * required to support multiple hugepage sizes. For example
22 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23 * introduced the same on powerpc. This allows for a more flexible hugepage
24 * pagetable layout.
25 */
26typedef struct { unsigned long pd; } hugepd_t;
27#define is_hugepd(hugepd) (0)
28#define __hugepd(x) ((hugepd_t) { (x) })
29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32{
33 return 0;
34}
35#else
36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39#endif
40
41
42#ifdef CONFIG_HUGETLB_PAGE
43
44#include <linux/mempolicy.h>
45#include <linux/shm.h>
46#include <asm/tlbflush.h>
47
48struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
51 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
52 long used_hpages; /* Used count against maximum, includes */
53 /* both alloced and reserved pages. */
54 struct hstate *hstate;
55 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
56 long rsv_hpages; /* Pages reserved against global pool to */
57 /* sasitfy minimum size. */
58};
59
60struct resv_map {
61 struct kref refs;
62 spinlock_t lock;
63 struct list_head regions;
64 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
67};
68extern struct resv_map *resv_map_alloc(void);
69void resv_map_release(struct kref *ref);
70
71extern spinlock_t hugetlb_lock;
72extern int hugetlb_max_hstate __read_mostly;
73#define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
76struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
78void hugepage_put_subpool(struct hugepage_subpool *spool);
79
80void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
85#ifdef CONFIG_NUMA
86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88#endif
89
90int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
93 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
95void unmap_hugepage_range(struct vm_area_struct *,
96 unsigned long, unsigned long, struct page *);
97void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
104void hugetlb_report_meminfo(struct seq_file *);
105int hugetlb_report_node_meminfo(int, char *);
106void hugetlb_show_meminfo(void);
107unsigned long hugetlb_total_pages(void);
108int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109 unsigned long address, unsigned int flags);
110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116 struct vm_area_struct *vma,
117 vm_flags_t vm_flags);
118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
122void free_huge_page(struct page *page);
123void hugetlb_fix_reserve_counts(struct inode *inode);
124extern struct mutex *hugetlb_fault_mutex_table;
125u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
126 struct vm_area_struct *vma,
127 struct address_space *mapping,
128 pgoff_t idx, unsigned long address);
129
130pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
131
132extern int hugepages_treat_as_movable;
133extern int sysctl_hugetlb_shm_group;
134extern struct list_head huge_boot_pages;
135
136/* arch callbacks */
137
138pte_t *huge_pte_alloc(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
140pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz);
142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
144 unsigned long *start, unsigned long *end);
145struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
146 int write);
147struct page *follow_huge_pd(struct vm_area_struct *vma,
148 unsigned long address, hugepd_t hpd,
149 int flags, int pdshift);
150struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
151 pmd_t *pmd, int flags);
152struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
153 pud_t *pud, int flags);
154struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
155 pgd_t *pgd, int flags);
156
157int pmd_huge(pmd_t pmd);
158int pud_huge(pud_t pud);
159unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
160 unsigned long address, unsigned long end, pgprot_t newprot);
161
162bool is_hugetlb_entry_migration(pte_t pte);
163#else /* !CONFIG_HUGETLB_PAGE */
164
165static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
166{
167}
168
169static inline unsigned long hugetlb_total_pages(void)
170{
171 return 0;
172}
173
174static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
175 pte_t *ptep)
176{
177 return 0;
178}
179
180static inline void adjust_range_if_pmd_sharing_possible(
181 struct vm_area_struct *vma,
182 unsigned long *start, unsigned long *end)
183{
184}
185
186#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
187#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
188#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
189static inline void hugetlb_report_meminfo(struct seq_file *m)
190{
191}
192#define hugetlb_report_node_meminfo(n, buf) 0
193static inline void hugetlb_show_meminfo(void)
194{
195}
196#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
197#define follow_huge_pmd(mm, addr, pmd, flags) NULL
198#define follow_huge_pud(mm, addr, pud, flags) NULL
199#define follow_huge_pgd(mm, addr, pgd, flags) NULL
200#define prepare_hugepage_range(file, addr, len) (-EINVAL)
201#define pmd_huge(x) 0
202#define pud_huge(x) 0
203#define is_hugepage_only_range(mm, addr, len) 0
204#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
205#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
206#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
207 src_addr, pagep) ({ BUG(); 0; })
208#define huge_pte_offset(mm, address, sz) 0
209
210static inline bool isolate_huge_page(struct page *page, struct list_head *list)
211{
212 return false;
213}
214#define putback_active_hugepage(p) do {} while (0)
215
216static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
217 unsigned long address, unsigned long end, pgprot_t newprot)
218{
219 return 0;
220}
221
222static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
223 struct vm_area_struct *vma, unsigned long start,
224 unsigned long end, struct page *ref_page)
225{
226 BUG();
227}
228
229static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
230 struct vm_area_struct *vma, unsigned long start,
231 unsigned long end, struct page *ref_page)
232{
233 BUG();
234}
235
236#endif /* !CONFIG_HUGETLB_PAGE */
237/*
238 * hugepages at page global directory. If arch support
239 * hugepages at pgd level, they need to define this.
240 */
241#ifndef pgd_huge
242#define pgd_huge(x) 0
243#endif
244#ifndef p4d_huge
245#define p4d_huge(x) 0
246#endif
247
248#ifndef pgd_write
249static inline int pgd_write(pgd_t pgd)
250{
251 BUG();
252 return 0;
253}
254#endif
255
256#define HUGETLB_ANON_FILE "anon_hugepage"
257
258enum {
259 /*
260 * The file will be used as an shm file so shmfs accounting rules
261 * apply
262 */
263 HUGETLB_SHMFS_INODE = 1,
264 /*
265 * The file is being created on the internal vfs mount and shmfs
266 * accounting rules do not apply
267 */
268 HUGETLB_ANONHUGE_INODE = 2,
269};
270
271#ifdef CONFIG_HUGETLBFS
272struct hugetlbfs_sb_info {
273 long max_inodes; /* inodes allowed */
274 long free_inodes; /* inodes free */
275 spinlock_t stat_lock;
276 struct hstate *hstate;
277 struct hugepage_subpool *spool;
278 kuid_t uid;
279 kgid_t gid;
280 umode_t mode;
281};
282
283static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
284{
285 return sb->s_fs_info;
286}
287
288extern const struct file_operations hugetlbfs_file_operations;
289extern const struct vm_operations_struct hugetlb_vm_ops;
290struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
291 struct user_struct **user, int creat_flags,
292 int page_size_log);
293
294static inline bool is_file_hugepages(struct file *file)
295{
296 if (file->f_op == &hugetlbfs_file_operations)
297 return true;
298
299 return is_file_shm_hugepages(file);
300}
301
302
303#else /* !CONFIG_HUGETLBFS */
304
305#define is_file_hugepages(file) false
306static inline struct file *
307hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
308 struct user_struct **user, int creat_flags,
309 int page_size_log)
310{
311 return ERR_PTR(-ENOSYS);
312}
313
314#endif /* !CONFIG_HUGETLBFS */
315
316#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
317unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
318 unsigned long len, unsigned long pgoff,
319 unsigned long flags);
320#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
321
322#ifdef CONFIG_HUGETLB_PAGE
323
324#define HSTATE_NAME_LEN 32
325/* Defines one hugetlb page size */
326struct hstate {
327 int next_nid_to_alloc;
328 int next_nid_to_free;
329 unsigned int order;
330 unsigned long mask;
331 unsigned long max_huge_pages;
332 unsigned long nr_huge_pages;
333 unsigned long free_huge_pages;
334 unsigned long resv_huge_pages;
335 unsigned long surplus_huge_pages;
336 unsigned long nr_overcommit_huge_pages;
337 struct list_head hugepage_activelist;
338 struct list_head hugepage_freelists[MAX_NUMNODES];
339 unsigned int nr_huge_pages_node[MAX_NUMNODES];
340 unsigned int free_huge_pages_node[MAX_NUMNODES];
341 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
342#ifdef CONFIG_CGROUP_HUGETLB
343 /* cgroup control files */
344 struct cftype cgroup_files[5];
345#endif
346 char name[HSTATE_NAME_LEN];
347};
348
349struct huge_bootmem_page {
350 struct list_head list;
351 struct hstate *hstate;
352#ifdef CONFIG_HIGHMEM
353 phys_addr_t phys;
354#endif
355};
356
357struct page *alloc_huge_page(struct vm_area_struct *vma,
358 unsigned long addr, int avoid_reserve);
359struct page *alloc_huge_page_node(struct hstate *h, int nid);
360struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
361 unsigned long addr, int avoid_reserve);
362struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
363 nodemask_t *nmask);
364int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
365 pgoff_t idx);
366
367/* arch callback */
368int __init __alloc_bootmem_huge_page(struct hstate *h);
369int __init alloc_bootmem_huge_page(struct hstate *h);
370
371void __init hugetlb_bad_size(void);
372void __init hugetlb_add_hstate(unsigned order);
373struct hstate *size_to_hstate(unsigned long size);
374
375#ifndef HUGE_MAX_HSTATE
376#define HUGE_MAX_HSTATE 1
377#endif
378
379extern struct hstate hstates[HUGE_MAX_HSTATE];
380extern unsigned int default_hstate_idx;
381
382#define default_hstate (hstates[default_hstate_idx])
383
384static inline struct hstate *hstate_inode(struct inode *i)
385{
386 return HUGETLBFS_SB(i->i_sb)->hstate;
387}
388
389static inline struct hstate *hstate_file(struct file *f)
390{
391 return hstate_inode(file_inode(f));
392}
393
394static inline struct hstate *hstate_sizelog(int page_size_log)
395{
396 if (!page_size_log)
397 return &default_hstate;
398
399 return size_to_hstate(1UL << page_size_log);
400}
401
402static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
403{
404 return hstate_file(vma->vm_file);
405}
406
407static inline unsigned long huge_page_size(struct hstate *h)
408{
409 return (unsigned long)PAGE_SIZE << h->order;
410}
411
412extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
413
414extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
415
416static inline unsigned long huge_page_mask(struct hstate *h)
417{
418 return h->mask;
419}
420
421static inline unsigned int huge_page_order(struct hstate *h)
422{
423 return h->order;
424}
425
426static inline unsigned huge_page_shift(struct hstate *h)
427{
428 return h->order + PAGE_SHIFT;
429}
430
431static inline bool hstate_is_gigantic(struct hstate *h)
432{
433 return huge_page_order(h) >= MAX_ORDER;
434}
435
436static inline unsigned int pages_per_huge_page(struct hstate *h)
437{
438 return 1 << h->order;
439}
440
441static inline unsigned int blocks_per_huge_page(struct hstate *h)
442{
443 return huge_page_size(h) / 512;
444}
445
446#include <asm/hugetlb.h>
447
448#ifndef arch_make_huge_pte
449static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
450 struct page *page, int writable)
451{
452 return entry;
453}
454#endif
455
456static inline struct hstate *page_hstate(struct page *page)
457{
458 VM_BUG_ON_PAGE(!PageHuge(page), page);
459 return size_to_hstate(PAGE_SIZE << compound_order(page));
460}
461
462static inline unsigned hstate_index_to_shift(unsigned index)
463{
464 return hstates[index].order + PAGE_SHIFT;
465}
466
467static inline int hstate_index(struct hstate *h)
468{
469 return h - hstates;
470}
471
472pgoff_t __basepage_index(struct page *page);
473
474/* Return page->index in PAGE_SIZE units */
475static inline pgoff_t basepage_index(struct page *page)
476{
477 if (!PageCompound(page))
478 return page->index;
479
480 return __basepage_index(page);
481}
482
483extern int dissolve_free_huge_page(struct page *page);
484extern int dissolve_free_huge_pages(unsigned long start_pfn,
485 unsigned long end_pfn);
486static inline bool hugepage_migration_supported(struct hstate *h)
487{
488#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
489 if ((huge_page_shift(h) == PMD_SHIFT) ||
490 (huge_page_shift(h) == PGDIR_SHIFT))
491 return true;
492 else
493 return false;
494#else
495 return false;
496#endif
497}
498
499static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
500 struct mm_struct *mm, pte_t *pte)
501{
502 if (huge_page_size(h) == PMD_SIZE)
503 return pmd_lockptr(mm, (pmd_t *) pte);
504 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
505 return &mm->page_table_lock;
506}
507
508#ifndef hugepages_supported
509/*
510 * Some platform decide whether they support huge pages at boot
511 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
512 * when there is no such support
513 */
514#define hugepages_supported() (HPAGE_SHIFT != 0)
515#endif
516
517void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
518
519static inline void hugetlb_count_add(long l, struct mm_struct *mm)
520{
521 atomic_long_add(l, &mm->hugetlb_usage);
522}
523
524static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
525{
526 atomic_long_sub(l, &mm->hugetlb_usage);
527}
528
529#ifndef set_huge_swap_pte_at
530static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
531 pte_t *ptep, pte_t pte, unsigned long sz)
532{
533 set_huge_pte_at(mm, addr, ptep, pte);
534}
535#endif
536#else /* CONFIG_HUGETLB_PAGE */
537struct hstate {};
538#define alloc_huge_page(v, a, r) NULL
539#define alloc_huge_page_node(h, nid) NULL
540#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
541#define alloc_huge_page_noerr(v, a, r) NULL
542#define alloc_bootmem_huge_page(h) NULL
543#define hstate_file(f) NULL
544#define hstate_sizelog(s) NULL
545#define hstate_vma(v) NULL
546#define hstate_inode(i) NULL
547#define page_hstate(page) NULL
548#define huge_page_size(h) PAGE_SIZE
549#define huge_page_mask(h) PAGE_MASK
550#define vma_kernel_pagesize(v) PAGE_SIZE
551#define vma_mmu_pagesize(v) PAGE_SIZE
552#define huge_page_order(h) 0
553#define huge_page_shift(h) PAGE_SHIFT
554static inline bool hstate_is_gigantic(struct hstate *h)
555{
556 return false;
557}
558
559static inline unsigned int pages_per_huge_page(struct hstate *h)
560{
561 return 1;
562}
563
564static inline unsigned hstate_index_to_shift(unsigned index)
565{
566 return 0;
567}
568
569static inline int hstate_index(struct hstate *h)
570{
571 return 0;
572}
573
574static inline pgoff_t basepage_index(struct page *page)
575{
576 return page->index;
577}
578
579static inline int dissolve_free_huge_page(struct page *page)
580{
581 return 0;
582}
583
584static inline int dissolve_free_huge_pages(unsigned long start_pfn,
585 unsigned long end_pfn)
586{
587 return 0;
588}
589
590static inline bool hugepage_migration_supported(struct hstate *h)
591{
592 return false;
593}
594
595static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
596 struct mm_struct *mm, pte_t *pte)
597{
598 return &mm->page_table_lock;
599}
600
601static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
602{
603}
604
605static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
606{
607}
608
609static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
610 pte_t *ptep, pte_t pte, unsigned long sz)
611{
612}
613#endif /* CONFIG_HUGETLB_PAGE */
614
615static inline spinlock_t *huge_pte_lock(struct hstate *h,
616 struct mm_struct *mm, pte_t *pte)
617{
618 spinlock_t *ptl;
619
620 ptl = huge_pte_lockptr(h, mm, pte);
621 spin_lock(ptl);
622 return ptl;
623}
624
625#endif /* _LINUX_HUGETLB_H */