1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KHUGEPAGED_H
3 #define _LINUX_KHUGEPAGED_H
4 
5 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6 
7 
8 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9 extern struct attribute_group khugepaged_attr_group;
10 
11 extern int khugepaged_init(void);
12 extern void khugepaged_destroy(void);
13 extern int start_stop_khugepaged(void);
14 extern int __khugepaged_enter(struct mm_struct *mm);
15 extern void __khugepaged_exit(struct mm_struct *mm);
16 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
17 				      unsigned long vm_flags);
18 #ifdef CONFIG_SHMEM
19 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
20 #else
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)21 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
22 					   unsigned long addr)
23 {
24 }
25 #endif
26 
27 #define khugepaged_enabled()					       \
28 	(transparent_hugepage_flags &				       \
29 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
30 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
31 #define khugepaged_always()				\
32 	(transparent_hugepage_flags &			\
33 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
34 #define khugepaged_req_madv()					\
35 	(transparent_hugepage_flags &				\
36 	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
37 #define khugepaged_defrag()					\
38 	(transparent_hugepage_flags &				\
39 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
40 
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)41 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
42 {
43 	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
44 		return __khugepaged_enter(mm);
45 	return 0;
46 }
47 
khugepaged_exit(struct mm_struct * mm)48 static inline void khugepaged_exit(struct mm_struct *mm)
49 {
50 	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
51 		__khugepaged_exit(mm);
52 }
53 
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)54 static inline int khugepaged_enter(struct vm_area_struct *vma,
55 				   unsigned long vm_flags)
56 {
57 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
58 		if ((khugepaged_always() ||
59 		     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
60 		    !(vm_flags & VM_NOHUGEPAGE) &&
61 		    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
62 			if (__khugepaged_enter(vma->vm_mm))
63 				return -ENOMEM;
64 	return 0;
65 }
66 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)67 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
68 {
69 	return 0;
70 }
khugepaged_exit(struct mm_struct * mm)71 static inline void khugepaged_exit(struct mm_struct *mm)
72 {
73 }
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)74 static inline int khugepaged_enter(struct vm_area_struct *vma,
75 				   unsigned long vm_flags)
76 {
77 	return 0;
78 }
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)79 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
80 					     unsigned long vm_flags)
81 {
82 	return 0;
83 }
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)84 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
85 					   unsigned long addr)
86 {
87 }
88 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
89 
90 #endif /* _LINUX_KHUGEPAGED_H */
91