1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/mm.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
11
12 #include <asm/cacheflush.h>
13
14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
16 {
17 }
18 #endif
19
20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)21 static inline void flush_kernel_dcache_page(struct page *page)
22 {
23 }
flush_kernel_vmap_range(void * vaddr,int size)24 static inline void flush_kernel_vmap_range(void *vaddr, int size)
25 {
26 }
invalidate_kernel_vmap_range(void * vaddr,int size)27 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28 {
29 }
30 #endif
31
32 #include <asm/kmap_types.h>
33
34 #ifdef CONFIG_HIGHMEM
35 #include <asm/highmem.h>
36
37 /* declarations for linux/mm/highmem.c */
38 unsigned int nr_free_highpages(void);
39 extern unsigned long totalhigh_pages;
40
41 void kmap_flush_unused(void);
42
43 struct page *kmap_to_page(void *addr);
44
45 #else /* CONFIG_HIGHMEM */
46
nr_free_highpages(void)47 static inline unsigned int nr_free_highpages(void) { return 0; }
48
kmap_to_page(void * addr)49 static inline struct page *kmap_to_page(void *addr)
50 {
51 return virt_to_page(addr);
52 }
53
54 #define totalhigh_pages 0UL
55
56 #ifndef ARCH_HAS_KMAP
kmap(struct page * page)57 static inline void *kmap(struct page *page)
58 {
59 might_sleep();
60 return page_address(page);
61 }
62
kunmap(struct page * page)63 static inline void kunmap(struct page *page)
64 {
65 }
66
kmap_atomic(struct page * page)67 static inline void *kmap_atomic(struct page *page)
68 {
69 preempt_disable();
70 pagefault_disable();
71 return page_address(page);
72 }
73 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
74
__kunmap_atomic(void * addr)75 static inline void __kunmap_atomic(void *addr)
76 {
77 pagefault_enable();
78 preempt_enable();
79 }
80
81 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
82
83 #define kmap_flush_unused() do {} while(0)
84 #endif
85
86 #endif /* CONFIG_HIGHMEM */
87
88 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
89
90 DECLARE_PER_CPU(int, __kmap_atomic_idx);
91
kmap_atomic_idx_push(void)92 static inline int kmap_atomic_idx_push(void)
93 {
94 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
95
96 #ifdef CONFIG_DEBUG_HIGHMEM
97 WARN_ON_ONCE(in_irq() && !irqs_disabled());
98 BUG_ON(idx >= KM_TYPE_NR);
99 #endif
100 return idx;
101 }
102
kmap_atomic_idx(void)103 static inline int kmap_atomic_idx(void)
104 {
105 return __this_cpu_read(__kmap_atomic_idx) - 1;
106 }
107
kmap_atomic_idx_pop(void)108 static inline void kmap_atomic_idx_pop(void)
109 {
110 #ifdef CONFIG_DEBUG_HIGHMEM
111 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
112
113 BUG_ON(idx < 0);
114 #else
115 __this_cpu_dec(__kmap_atomic_idx);
116 #endif
117 }
118
119 #endif
120
121 /*
122 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
123 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
124 */
125 #define kunmap_atomic(addr) \
126 do { \
127 BUILD_BUG_ON(__same_type((addr), struct page *)); \
128 __kunmap_atomic(addr); \
129 } while (0)
130
131
132 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
133 #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)134 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
135 {
136 void *addr = kmap_atomic(page);
137 clear_user_page(addr, vaddr, page);
138 kunmap_atomic(addr);
139 }
140 #endif
141
142 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
143 /**
144 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
145 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
146 * @vma: The VMA the page is to be allocated for
147 * @vaddr: The virtual address the page will be inserted into
148 *
149 * This function will allocate a page for a VMA but the caller is expected
150 * to specify via movableflags whether the page will be movable in the
151 * future or not
152 *
153 * An architecture may override this function by defining
154 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
155 * implementation.
156 */
157 static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,struct vm_area_struct * vma,unsigned long vaddr)158 __alloc_zeroed_user_highpage(gfp_t movableflags,
159 struct vm_area_struct *vma,
160 unsigned long vaddr)
161 {
162 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
163 vma, vaddr);
164
165 if (page)
166 clear_user_highpage(page, vaddr);
167
168 return page;
169 }
170 #endif
171
172 /**
173 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
174 * @vma: The VMA the page is to be allocated for
175 * @vaddr: The virtual address the page will be inserted into
176 *
177 * This function will allocate a page for a VMA that the caller knows will
178 * be able to migrate in the future using move_pages() or reclaimed
179 */
180 static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct * vma,unsigned long vaddr)181 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
182 unsigned long vaddr)
183 {
184 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
185 }
186
clear_highpage(struct page * page)187 static inline void clear_highpage(struct page *page)
188 {
189 void *kaddr = kmap_atomic(page);
190 clear_page(kaddr);
191 kunmap_atomic(kaddr);
192 }
193
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)194 static inline void zero_user_segments(struct page *page,
195 unsigned start1, unsigned end1,
196 unsigned start2, unsigned end2)
197 {
198 void *kaddr = kmap_atomic(page);
199
200 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
201
202 if (end1 > start1)
203 memset(kaddr + start1, 0, end1 - start1);
204
205 if (end2 > start2)
206 memset(kaddr + start2, 0, end2 - start2);
207
208 kunmap_atomic(kaddr);
209 flush_dcache_page(page);
210 }
211
zero_user_segment(struct page * page,unsigned start,unsigned end)212 static inline void zero_user_segment(struct page *page,
213 unsigned start, unsigned end)
214 {
215 zero_user_segments(page, start, end, 0, 0);
216 }
217
zero_user(struct page * page,unsigned start,unsigned size)218 static inline void zero_user(struct page *page,
219 unsigned start, unsigned size)
220 {
221 zero_user_segments(page, start, start + size, 0, 0);
222 }
223
224 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
225
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)226 static inline void copy_user_highpage(struct page *to, struct page *from,
227 unsigned long vaddr, struct vm_area_struct *vma)
228 {
229 char *vfrom, *vto;
230
231 vfrom = kmap_atomic(from);
232 vto = kmap_atomic(to);
233 copy_user_page(vto, vfrom, vaddr, to);
234 kunmap_atomic(vto);
235 kunmap_atomic(vfrom);
236 }
237
238 #endif
239
240 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
241
copy_highpage(struct page * to,struct page * from)242 static inline void copy_highpage(struct page *to, struct page *from)
243 {
244 char *vfrom, *vto;
245
246 vfrom = kmap_atomic(from);
247 vto = kmap_atomic(to);
248 copy_page(vto, vfrom);
249 kunmap_atomic(vto);
250 kunmap_atomic(vfrom);
251 }
252
253 #endif
254
255 #endif /* _LINUX_HIGHMEM_H */
256