1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <asm/pgtable.h>
13
14 struct ctl_table;
15 struct user_struct;
16 struct mmu_gather;
17
18 #ifndef is_hugepd
19 typedef struct { unsigned long pd; } hugepd_t;
20 #define is_hugepd(hugepd) (0)
21 #define __hugepd(x) ((hugepd_t) { (x) })
22 #endif
23
24 #ifdef CONFIG_HUGETLB_PAGE
25
26 #include <linux/mempolicy.h>
27 #include <linux/shm.h>
28 #include <asm/tlbflush.h>
29
30 struct hugepage_subpool {
31 spinlock_t lock;
32 long count;
33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
34 long used_hpages; /* Used count against maximum, includes */
35 /* both alloced and reserved pages. */
36 struct hstate *hstate;
37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
38 long rsv_hpages; /* Pages reserved against global pool to */
39 /* sasitfy minimum size. */
40 };
41
42 struct resv_map {
43 struct kref refs;
44 spinlock_t lock;
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
49 };
50 extern struct resv_map *resv_map_alloc(void);
51 void resv_map_release(struct kref *ref);
52
53 extern spinlock_t hugetlb_lock;
54 extern int hugetlb_max_hstate __read_mostly;
55 #define for_each_hstate(h) \
56 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
57
58 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
59 long min_hpages);
60 void hugepage_put_subpool(struct hugepage_subpool *spool);
61
62 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
63 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
64 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
66
67 #ifdef CONFIG_NUMA
68 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
69 void __user *, size_t *, loff_t *);
70 #endif
71
72 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
73 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
74 struct page **, struct vm_area_struct **,
75 unsigned long *, unsigned long *, long, unsigned int,
76 int *);
77 void unmap_hugepage_range(struct vm_area_struct *,
78 unsigned long, unsigned long, struct page *);
79 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
80 struct vm_area_struct *vma,
81 unsigned long start, unsigned long end,
82 struct page *ref_page);
83 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
84 unsigned long start, unsigned long end,
85 struct page *ref_page);
86 void hugetlb_report_meminfo(struct seq_file *);
87 int hugetlb_report_node_meminfo(int, char *);
88 void hugetlb_show_meminfo(void);
89 unsigned long hugetlb_total_pages(void);
90 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91 unsigned long address, unsigned int flags);
92 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
93 struct vm_area_struct *dst_vma,
94 unsigned long dst_addr,
95 unsigned long src_addr,
96 struct page **pagep);
97 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
98 struct vm_area_struct *vma,
99 vm_flags_t vm_flags);
100 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
101 long freed);
102 bool isolate_huge_page(struct page *page, struct list_head *list);
103 void putback_active_hugepage(struct page *page);
104 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
105 void free_huge_page(struct page *page);
106 void hugetlb_fix_reserve_counts(struct inode *inode);
107 extern struct mutex *hugetlb_fault_mutex_table;
108 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
109 pgoff_t idx, unsigned long address);
110
111 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
112
113 extern int sysctl_hugetlb_shm_group;
114 extern struct list_head huge_boot_pages;
115
116 /* arch callbacks */
117
118 pte_t *huge_pte_alloc(struct mm_struct *mm,
119 unsigned long addr, unsigned long sz);
120 pte_t *huge_pte_offset(struct mm_struct *mm,
121 unsigned long addr, unsigned long sz);
122 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
123 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
124 unsigned long *start, unsigned long *end);
125 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
126 int write);
127 struct page *follow_huge_pd(struct vm_area_struct *vma,
128 unsigned long address, hugepd_t hpd,
129 int flags, int pdshift);
130 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
131 pmd_t *pmd, int flags);
132 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
133 pud_t *pud, int flags);
134 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
135 pgd_t *pgd, int flags);
136
137 int pmd_huge(pmd_t pmd);
138 int pud_huge(pud_t pud);
139 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
140 unsigned long address, unsigned long end, pgprot_t newprot);
141
142 bool is_hugetlb_entry_migration(pte_t pte);
143
144 #else /* !CONFIG_HUGETLB_PAGE */
145
reset_vma_resv_huge_pages(struct vm_area_struct * vma)146 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
147 {
148 }
149
hugetlb_total_pages(void)150 static inline unsigned long hugetlb_total_pages(void)
151 {
152 return 0;
153 }
154
huge_pmd_unshare(struct mm_struct * mm,unsigned long * addr,pte_t * ptep)155 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
156 pte_t *ptep)
157 {
158 return 0;
159 }
160
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)161 static inline void adjust_range_if_pmd_sharing_possible(
162 struct vm_area_struct *vma,
163 unsigned long *start, unsigned long *end)
164 {
165 }
166
167 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
168 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
169 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
hugetlb_report_meminfo(struct seq_file * m)170 static inline void hugetlb_report_meminfo(struct seq_file *m)
171 {
172 }
173 #define hugetlb_report_node_meminfo(n, buf) 0
hugetlb_show_meminfo(void)174 static inline void hugetlb_show_meminfo(void)
175 {
176 }
177 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
178 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
179 #define follow_huge_pud(mm, addr, pud, flags) NULL
180 #define follow_huge_pgd(mm, addr, pgd, flags) NULL
181 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
182 #define pmd_huge(x) 0
183 #define pud_huge(x) 0
184 #define is_hugepage_only_range(mm, addr, len) 0
185 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
186 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
187 src_addr, pagep) ({ BUG(); 0; })
188 #define huge_pte_offset(mm, address, sz) 0
189
isolate_huge_page(struct page * page,struct list_head * list)190 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
191 {
192 return false;
193 }
194 #define putback_active_hugepage(p) do {} while (0)
195 #define move_hugetlb_state(old, new, reason) do {} while (0)
196
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)197 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
198 unsigned long address, unsigned long end, pgprot_t newprot)
199 {
200 return 0;
201 }
202
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)203 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
204 struct vm_area_struct *vma, unsigned long start,
205 unsigned long end, struct page *ref_page)
206 {
207 BUG();
208 }
209
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)210 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
211 struct vm_area_struct *vma, unsigned long start,
212 unsigned long end, struct page *ref_page)
213 {
214 BUG();
215 }
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)216 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
217 struct vm_area_struct *vma, unsigned long address,
218 unsigned int flags)
219 {
220 BUG();
221 return 0;
222 }
223
224 #endif /* !CONFIG_HUGETLB_PAGE */
225 /*
226 * hugepages at page global directory. If arch support
227 * hugepages at pgd level, they need to define this.
228 */
229 #ifndef pgd_huge
230 #define pgd_huge(x) 0
231 #endif
232 #ifndef p4d_huge
233 #define p4d_huge(x) 0
234 #endif
235
236 #ifndef pgd_write
pgd_write(pgd_t pgd)237 static inline int pgd_write(pgd_t pgd)
238 {
239 BUG();
240 return 0;
241 }
242 #endif
243
244 #define HUGETLB_ANON_FILE "anon_hugepage"
245
246 enum {
247 /*
248 * The file will be used as an shm file so shmfs accounting rules
249 * apply
250 */
251 HUGETLB_SHMFS_INODE = 1,
252 /*
253 * The file is being created on the internal vfs mount and shmfs
254 * accounting rules do not apply
255 */
256 HUGETLB_ANONHUGE_INODE = 2,
257 };
258
259 #ifdef CONFIG_HUGETLBFS
260 struct hugetlbfs_sb_info {
261 long max_inodes; /* inodes allowed */
262 long free_inodes; /* inodes free */
263 spinlock_t stat_lock;
264 struct hstate *hstate;
265 struct hugepage_subpool *spool;
266 kuid_t uid;
267 kgid_t gid;
268 umode_t mode;
269 };
270
HUGETLBFS_SB(struct super_block * sb)271 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
272 {
273 return sb->s_fs_info;
274 }
275
276 struct hugetlbfs_inode_info {
277 struct shared_policy policy;
278 struct inode vfs_inode;
279 unsigned int seals;
280 };
281
HUGETLBFS_I(struct inode * inode)282 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
283 {
284 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
285 }
286
287 extern const struct file_operations hugetlbfs_file_operations;
288 extern const struct vm_operations_struct hugetlb_vm_ops;
289 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
290 struct user_struct **user, int creat_flags,
291 int page_size_log);
292
is_file_hugepages(struct file * file)293 static inline bool is_file_hugepages(struct file *file)
294 {
295 if (file->f_op == &hugetlbfs_file_operations)
296 return true;
297
298 return is_file_shm_hugepages(file);
299 }
300
301
302 #else /* !CONFIG_HUGETLBFS */
303
304 #define is_file_hugepages(file) false
305 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)306 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
307 struct user_struct **user, int creat_flags,
308 int page_size_log)
309 {
310 return ERR_PTR(-ENOSYS);
311 }
312
313 #endif /* !CONFIG_HUGETLBFS */
314
315 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
316 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
317 unsigned long len, unsigned long pgoff,
318 unsigned long flags);
319 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
320
321 #ifdef CONFIG_HUGETLB_PAGE
322
323 #define HSTATE_NAME_LEN 32
324 /* Defines one hugetlb page size */
325 struct hstate {
326 int next_nid_to_alloc;
327 int next_nid_to_free;
328 unsigned int order;
329 unsigned long mask;
330 unsigned long max_huge_pages;
331 unsigned long nr_huge_pages;
332 unsigned long free_huge_pages;
333 unsigned long resv_huge_pages;
334 unsigned long surplus_huge_pages;
335 unsigned long nr_overcommit_huge_pages;
336 struct list_head hugepage_activelist;
337 struct list_head hugepage_freelists[MAX_NUMNODES];
338 unsigned int nr_huge_pages_node[MAX_NUMNODES];
339 unsigned int free_huge_pages_node[MAX_NUMNODES];
340 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
341 #ifdef CONFIG_CGROUP_HUGETLB
342 /* cgroup control files */
343 struct cftype cgroup_files[5];
344 #endif
345 char name[HSTATE_NAME_LEN];
346 };
347
348 struct huge_bootmem_page {
349 struct list_head list;
350 struct hstate *hstate;
351 };
352
353 struct page *alloc_huge_page(struct vm_area_struct *vma,
354 unsigned long addr, int avoid_reserve);
355 struct page *alloc_huge_page_node(struct hstate *h, int nid);
356 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
357 nodemask_t *nmask);
358 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
359 unsigned long address);
360 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
361 int nid, nodemask_t *nmask);
362 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
363 pgoff_t idx);
364
365 /* arch callback */
366 int __init __alloc_bootmem_huge_page(struct hstate *h);
367 int __init alloc_bootmem_huge_page(struct hstate *h);
368
369 void __init hugetlb_bad_size(void);
370 void __init hugetlb_add_hstate(unsigned order);
371 struct hstate *size_to_hstate(unsigned long size);
372
373 #ifndef HUGE_MAX_HSTATE
374 #define HUGE_MAX_HSTATE 1
375 #endif
376
377 extern struct hstate hstates[HUGE_MAX_HSTATE];
378 extern unsigned int default_hstate_idx;
379
380 #define default_hstate (hstates[default_hstate_idx])
381
hstate_inode(struct inode * i)382 static inline struct hstate *hstate_inode(struct inode *i)
383 {
384 return HUGETLBFS_SB(i->i_sb)->hstate;
385 }
386
hstate_file(struct file * f)387 static inline struct hstate *hstate_file(struct file *f)
388 {
389 return hstate_inode(file_inode(f));
390 }
391
hstate_sizelog(int page_size_log)392 static inline struct hstate *hstate_sizelog(int page_size_log)
393 {
394 if (!page_size_log)
395 return &default_hstate;
396
397 return size_to_hstate(1UL << page_size_log);
398 }
399
hstate_vma(struct vm_area_struct * vma)400 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
401 {
402 return hstate_file(vma->vm_file);
403 }
404
huge_page_size(struct hstate * h)405 static inline unsigned long huge_page_size(struct hstate *h)
406 {
407 return (unsigned long)PAGE_SIZE << h->order;
408 }
409
410 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
411
412 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
413
huge_page_mask(struct hstate * h)414 static inline unsigned long huge_page_mask(struct hstate *h)
415 {
416 return h->mask;
417 }
418
huge_page_order(struct hstate * h)419 static inline unsigned int huge_page_order(struct hstate *h)
420 {
421 return h->order;
422 }
423
huge_page_shift(struct hstate * h)424 static inline unsigned huge_page_shift(struct hstate *h)
425 {
426 return h->order + PAGE_SHIFT;
427 }
428
hstate_is_gigantic(struct hstate * h)429 static inline bool hstate_is_gigantic(struct hstate *h)
430 {
431 return huge_page_order(h) >= MAX_ORDER;
432 }
433
pages_per_huge_page(struct hstate * h)434 static inline unsigned int pages_per_huge_page(struct hstate *h)
435 {
436 return 1 << h->order;
437 }
438
blocks_per_huge_page(struct hstate * h)439 static inline unsigned int blocks_per_huge_page(struct hstate *h)
440 {
441 return huge_page_size(h) / 512;
442 }
443
444 #include <asm/hugetlb.h>
445
446 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)447 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
448 struct page *page, int writable)
449 {
450 return entry;
451 }
452 #endif
453
page_hstate(struct page * page)454 static inline struct hstate *page_hstate(struct page *page)
455 {
456 VM_BUG_ON_PAGE(!PageHuge(page), page);
457 return size_to_hstate(page_size(page));
458 }
459
hstate_index_to_shift(unsigned index)460 static inline unsigned hstate_index_to_shift(unsigned index)
461 {
462 return hstates[index].order + PAGE_SHIFT;
463 }
464
hstate_index(struct hstate * h)465 static inline int hstate_index(struct hstate *h)
466 {
467 return h - hstates;
468 }
469
470 pgoff_t __basepage_index(struct page *page);
471
472 /* Return page->index in PAGE_SIZE units */
basepage_index(struct page * page)473 static inline pgoff_t basepage_index(struct page *page)
474 {
475 if (!PageCompound(page))
476 return page->index;
477
478 return __basepage_index(page);
479 }
480
481 extern int dissolve_free_huge_page(struct page *page);
482 extern int dissolve_free_huge_pages(unsigned long start_pfn,
483 unsigned long end_pfn);
484
485 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
486 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)487 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
488 {
489 if ((huge_page_shift(h) == PMD_SHIFT) ||
490 (huge_page_shift(h) == PUD_SHIFT) ||
491 (huge_page_shift(h) == PGDIR_SHIFT))
492 return true;
493 else
494 return false;
495 }
496 #endif
497 #else
arch_hugetlb_migration_supported(struct hstate * h)498 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
499 {
500 return false;
501 }
502 #endif
503
hugepage_migration_supported(struct hstate * h)504 static inline bool hugepage_migration_supported(struct hstate *h)
505 {
506 return arch_hugetlb_migration_supported(h);
507 }
508
509 /*
510 * Movability check is different as compared to migration check.
511 * It determines whether or not a huge page should be placed on
512 * movable zone or not. Movability of any huge page should be
513 * required only if huge page size is supported for migration.
514 * There wont be any reason for the huge page to be movable if
515 * it is not migratable to start with. Also the size of the huge
516 * page should be large enough to be placed under a movable zone
517 * and still feasible enough to be migratable. Just the presence
518 * in movable zone does not make the migration feasible.
519 *
520 * So even though large huge page sizes like the gigantic ones
521 * are migratable they should not be movable because its not
522 * feasible to migrate them from movable zone.
523 */
hugepage_movable_supported(struct hstate * h)524 static inline bool hugepage_movable_supported(struct hstate *h)
525 {
526 if (!hugepage_migration_supported(h))
527 return false;
528
529 if (hstate_is_gigantic(h))
530 return false;
531 return true;
532 }
533
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)534 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
535 struct mm_struct *mm, pte_t *pte)
536 {
537 if (huge_page_size(h) == PMD_SIZE)
538 return pmd_lockptr(mm, (pmd_t *) pte);
539 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
540 return &mm->page_table_lock;
541 }
542
543 #ifndef hugepages_supported
544 /*
545 * Some platform decide whether they support huge pages at boot
546 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
547 * when there is no such support
548 */
549 #define hugepages_supported() (HPAGE_SHIFT != 0)
550 #endif
551
552 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
553
hugetlb_count_add(long l,struct mm_struct * mm)554 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
555 {
556 atomic_long_add(l, &mm->hugetlb_usage);
557 }
558
hugetlb_count_sub(long l,struct mm_struct * mm)559 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
560 {
561 atomic_long_sub(l, &mm->hugetlb_usage);
562 }
563
564 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)565 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
566 pte_t *ptep, pte_t pte, unsigned long sz)
567 {
568 set_huge_pte_at(mm, addr, ptep, pte);
569 }
570 #endif
571
572 #ifndef huge_ptep_modify_prot_start
573 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)574 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
575 unsigned long addr, pte_t *ptep)
576 {
577 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
578 }
579 #endif
580
581 #ifndef huge_ptep_modify_prot_commit
582 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)583 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
584 unsigned long addr, pte_t *ptep,
585 pte_t old_pte, pte_t pte)
586 {
587 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
588 }
589 #endif
590
591 #else /* CONFIG_HUGETLB_PAGE */
592 struct hstate {};
593
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)594 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
595 unsigned long addr,
596 int avoid_reserve)
597 {
598 return NULL;
599 }
600
alloc_huge_page_node(struct hstate * h,int nid)601 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
602 {
603 return NULL;
604 }
605
606 static inline struct page *
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask)607 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
608 {
609 return NULL;
610 }
611
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)612 static inline struct page *alloc_huge_page_vma(struct hstate *h,
613 struct vm_area_struct *vma,
614 unsigned long address)
615 {
616 return NULL;
617 }
618
__alloc_bootmem_huge_page(struct hstate * h)619 static inline int __alloc_bootmem_huge_page(struct hstate *h)
620 {
621 return 0;
622 }
623
hstate_file(struct file * f)624 static inline struct hstate *hstate_file(struct file *f)
625 {
626 return NULL;
627 }
628
hstate_sizelog(int page_size_log)629 static inline struct hstate *hstate_sizelog(int page_size_log)
630 {
631 return NULL;
632 }
633
hstate_vma(struct vm_area_struct * vma)634 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
635 {
636 return NULL;
637 }
638
hstate_inode(struct inode * i)639 static inline struct hstate *hstate_inode(struct inode *i)
640 {
641 return NULL;
642 }
643
page_hstate(struct page * page)644 static inline struct hstate *page_hstate(struct page *page)
645 {
646 return NULL;
647 }
648
huge_page_size(struct hstate * h)649 static inline unsigned long huge_page_size(struct hstate *h)
650 {
651 return PAGE_SIZE;
652 }
653
huge_page_mask(struct hstate * h)654 static inline unsigned long huge_page_mask(struct hstate *h)
655 {
656 return PAGE_MASK;
657 }
658
vma_kernel_pagesize(struct vm_area_struct * vma)659 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
660 {
661 return PAGE_SIZE;
662 }
663
vma_mmu_pagesize(struct vm_area_struct * vma)664 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
665 {
666 return PAGE_SIZE;
667 }
668
huge_page_order(struct hstate * h)669 static inline unsigned int huge_page_order(struct hstate *h)
670 {
671 return 0;
672 }
673
huge_page_shift(struct hstate * h)674 static inline unsigned int huge_page_shift(struct hstate *h)
675 {
676 return PAGE_SHIFT;
677 }
678
hstate_is_gigantic(struct hstate * h)679 static inline bool hstate_is_gigantic(struct hstate *h)
680 {
681 return false;
682 }
683
pages_per_huge_page(struct hstate * h)684 static inline unsigned int pages_per_huge_page(struct hstate *h)
685 {
686 return 1;
687 }
688
hstate_index_to_shift(unsigned index)689 static inline unsigned hstate_index_to_shift(unsigned index)
690 {
691 return 0;
692 }
693
hstate_index(struct hstate * h)694 static inline int hstate_index(struct hstate *h)
695 {
696 return 0;
697 }
698
basepage_index(struct page * page)699 static inline pgoff_t basepage_index(struct page *page)
700 {
701 return page->index;
702 }
703
dissolve_free_huge_page(struct page * page)704 static inline int dissolve_free_huge_page(struct page *page)
705 {
706 return 0;
707 }
708
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)709 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
710 unsigned long end_pfn)
711 {
712 return 0;
713 }
714
hugepage_migration_supported(struct hstate * h)715 static inline bool hugepage_migration_supported(struct hstate *h)
716 {
717 return false;
718 }
719
hugepage_movable_supported(struct hstate * h)720 static inline bool hugepage_movable_supported(struct hstate *h)
721 {
722 return false;
723 }
724
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)725 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
726 struct mm_struct *mm, pte_t *pte)
727 {
728 return &mm->page_table_lock;
729 }
730
hugetlb_report_usage(struct seq_file * f,struct mm_struct * m)731 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
732 {
733 }
734
hugetlb_count_sub(long l,struct mm_struct * mm)735 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
736 {
737 }
738
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)739 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
740 pte_t *ptep, pte_t pte, unsigned long sz)
741 {
742 }
743 #endif /* CONFIG_HUGETLB_PAGE */
744
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)745 static inline spinlock_t *huge_pte_lock(struct hstate *h,
746 struct mm_struct *mm, pte_t *pte)
747 {
748 spinlock_t *ptl;
749
750 ptl = huge_pte_lockptr(h, mm, pte);
751 spin_lock(ptl);
752 return ptl;
753 }
754
755 #endif /* _LINUX_HUGETLB_H */
756