1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KHUGEPAGED_H
3 #define _LINUX_KHUGEPAGED_H
4
5 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6
7
8 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9 extern struct attribute_group khugepaged_attr_group;
10
11 extern int khugepaged_init(void);
12 extern void khugepaged_destroy(void);
13 extern int start_stop_khugepaged(void);
14 extern int __khugepaged_enter(struct mm_struct *mm);
15 extern void __khugepaged_exit(struct mm_struct *mm);
16 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
17 unsigned long vm_flags);
18 extern void khugepaged_min_free_kbytes_update(void);
19 #ifdef CONFIG_SHMEM
20 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
21 #else
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)22 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
23 unsigned long addr)
24 {
25 }
26 #endif
27
28 #define khugepaged_enabled() \
29 (transparent_hugepage_flags & \
30 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
31 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
32 #define khugepaged_always() \
33 (transparent_hugepage_flags & \
34 (1<<TRANSPARENT_HUGEPAGE_FLAG))
35 #define khugepaged_req_madv() \
36 (transparent_hugepage_flags & \
37 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
38 #define khugepaged_defrag() \
39 (transparent_hugepage_flags & \
40 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
41
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)42 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
43 {
44 if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
45 return __khugepaged_enter(mm);
46 return 0;
47 }
48
khugepaged_exit(struct mm_struct * mm)49 static inline void khugepaged_exit(struct mm_struct *mm)
50 {
51 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
52 __khugepaged_exit(mm);
53 }
54
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)55 static inline int khugepaged_enter(struct vm_area_struct *vma,
56 unsigned long vm_flags)
57 {
58 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
59 if ((khugepaged_always() ||
60 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
61 !(vm_flags & VM_NOHUGEPAGE) &&
62 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
63 if (__khugepaged_enter(vma->vm_mm))
64 return -ENOMEM;
65 return 0;
66 }
67 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)68 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
69 {
70 return 0;
71 }
khugepaged_exit(struct mm_struct * mm)72 static inline void khugepaged_exit(struct mm_struct *mm)
73 {
74 }
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)75 static inline int khugepaged_enter(struct vm_area_struct *vma,
76 unsigned long vm_flags)
77 {
78 return 0;
79 }
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)80 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
81 unsigned long vm_flags)
82 {
83 return 0;
84 }
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)85 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
86 unsigned long addr)
87 {
88 }
89
khugepaged_min_free_kbytes_update(void)90 static inline void khugepaged_min_free_kbytes_update(void)
91 {
92 }
93 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
94
95 #endif /* _LINUX_KHUGEPAGED_H */
96