1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KHUGEPAGED_H
3 #define _LINUX_KHUGEPAGED_H
4 
5 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6 
7 
8 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9 extern struct attribute_group khugepaged_attr_group;
10 
11 extern int khugepaged_init(void);
12 extern void khugepaged_destroy(void);
13 extern int start_stop_khugepaged(void);
14 extern int __khugepaged_enter(struct mm_struct *mm);
15 extern void __khugepaged_exit(struct mm_struct *mm);
16 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
17 				      unsigned long vm_flags);
18 extern void khugepaged_min_free_kbytes_update(void);
19 
20 #define khugepaged_enabled()					       \
21 	(transparent_hugepage_flags &				       \
22 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
23 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
24 #define khugepaged_always()				\
25 	(transparent_hugepage_flags &			\
26 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
27 #define khugepaged_req_madv()					\
28 	(transparent_hugepage_flags &				\
29 	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
30 #define khugepaged_defrag()					\
31 	(transparent_hugepage_flags &				\
32 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
33 
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)34 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
35 {
36 	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
37 		return __khugepaged_enter(mm);
38 	return 0;
39 }
40 
khugepaged_exit(struct mm_struct * mm)41 static inline void khugepaged_exit(struct mm_struct *mm)
42 {
43 	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
44 		__khugepaged_exit(mm);
45 }
46 
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)47 static inline int khugepaged_enter(struct vm_area_struct *vma,
48 				   unsigned long vm_flags)
49 {
50 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
51 		if ((khugepaged_always() ||
52 		     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
53 		    !(vm_flags & VM_NOHUGEPAGE) &&
54 		    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
55 			if (__khugepaged_enter(vma->vm_mm))
56 				return -ENOMEM;
57 	return 0;
58 }
59 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)60 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
61 {
62 	return 0;
63 }
khugepaged_exit(struct mm_struct * mm)64 static inline void khugepaged_exit(struct mm_struct *mm)
65 {
66 }
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)67 static inline int khugepaged_enter(struct vm_area_struct *vma,
68 				   unsigned long vm_flags)
69 {
70 	return 0;
71 }
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)72 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
73 					     unsigned long vm_flags)
74 {
75 	return 0;
76 }
77 
khugepaged_min_free_kbytes_update(void)78 static inline void khugepaged_min_free_kbytes_update(void)
79 {
80 }
81 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
82 
83 #endif /* _LINUX_KHUGEPAGED_H */
84