1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4 
5 /*
6  * Copyright 1995 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18 
19 struct pagevec;
20 
21 /*
22  * Bits in mapping->flags.
23  */
24 enum mapping_flags {
25 	AS_EIO		= 0,	/* IO error on async write */
26 	AS_ENOSPC	= 1,	/* ENOSPC on async write */
27 	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
28 	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
29 	AS_EXITING	= 4, 	/* final truncate in progress */
30 	/* writeback related tags are not used */
31 	AS_NO_WRITEBACK_TAGS = 5,
32 };
33 
34 /**
35  * mapping_set_error - record a writeback error in the address_space
36  * @mapping - the mapping in which an error should be set
37  * @error - the error to set in the mapping
38  *
39  * When writeback fails in some way, we must record that error so that
40  * userspace can be informed when fsync and the like are called.  We endeavor
41  * to report errors on any file that was open at the time of the error.  Some
42  * internal callers also need to know when writeback errors have occurred.
43  *
44  * When a writeback error occurs, most filesystems will want to call
45  * mapping_set_error to record the error in the mapping so that it can be
46  * reported when the application calls fsync(2).
47  */
mapping_set_error(struct address_space * mapping,int error)48 static inline void mapping_set_error(struct address_space *mapping, int error)
49 {
50 	if (likely(!error))
51 		return;
52 
53 	/* Record in wb_err for checkers using errseq_t based tracking */
54 	filemap_set_wb_err(mapping, error);
55 
56 	/* Record it in flags for now, for legacy callers */
57 	if (error == -ENOSPC)
58 		set_bit(AS_ENOSPC, &mapping->flags);
59 	else
60 		set_bit(AS_EIO, &mapping->flags);
61 }
62 
mapping_set_unevictable(struct address_space * mapping)63 static inline void mapping_set_unevictable(struct address_space *mapping)
64 {
65 	set_bit(AS_UNEVICTABLE, &mapping->flags);
66 }
67 
mapping_clear_unevictable(struct address_space * mapping)68 static inline void mapping_clear_unevictable(struct address_space *mapping)
69 {
70 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
71 }
72 
mapping_unevictable(struct address_space * mapping)73 static inline int mapping_unevictable(struct address_space *mapping)
74 {
75 	if (mapping)
76 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 	return !!mapping;
78 }
79 
mapping_set_exiting(struct address_space * mapping)80 static inline void mapping_set_exiting(struct address_space *mapping)
81 {
82 	set_bit(AS_EXITING, &mapping->flags);
83 }
84 
mapping_exiting(struct address_space * mapping)85 static inline int mapping_exiting(struct address_space *mapping)
86 {
87 	return test_bit(AS_EXITING, &mapping->flags);
88 }
89 
mapping_set_no_writeback_tags(struct address_space * mapping)90 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91 {
92 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93 }
94 
mapping_use_writeback_tags(struct address_space * mapping)95 static inline int mapping_use_writeback_tags(struct address_space *mapping)
96 {
97 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98 }
99 
mapping_gfp_mask(struct address_space * mapping)100 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101 {
102 	return mapping->gfp_mask;
103 }
104 
105 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)106 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 		gfp_t gfp_mask)
108 {
109 	return mapping_gfp_mask(mapping) & gfp_mask;
110 }
111 
112 /*
113  * This is non-atomic.  Only to be used before the mapping is activated.
114  * Probably needs a barrier...
115  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)116 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117 {
118 	m->gfp_mask = mask;
119 }
120 
121 void release_pages(struct page **pages, int nr);
122 
123 /*
124  * speculatively take a reference to a page.
125  * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126  * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
127  *
128  * This function must be called inside the same rcu_read_lock() section as has
129  * been used to lookup the page in the pagecache radix-tree (or page table):
130  * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
131  *
132  * Unless an RCU grace period has passed, the count of all pages coming out
133  * of the allocator must be considered unstable. page_count may return higher
134  * than expected, and put_page must be able to do the right thing when the
135  * page has been finished with, no matter what it is subsequently allocated
136  * for (because put_page is what is used here to drop an invalid speculative
137  * reference).
138  *
139  * This is the interesting part of the lockless pagecache (and lockless
140  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141  * has the following pattern:
142  * 1. find page in radix tree
143  * 2. conditionally increment refcount
144  * 3. check the page is still in pagecache (if no, goto 1)
145  *
146  * Remove-side that cares about stability of _refcount (eg. reclaim) has the
147  * following (with the i_pages lock held):
148  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149  * B. remove page from pagecache
150  * C. free the page
151  *
152  * There are 2 critical interleavings that matter:
153  * - 2 runs before A: in this case, A sees elevated refcount and bails out
154  * - A runs before 2: in this case, 2 sees zero refcount and retries;
155  *   subsequently, B will complete and 1 will find no page, causing the
156  *   lookup to return NULL.
157  *
158  * It is possible that between 1 and 2, the page is removed then the exact same
159  * page is inserted into the same position in pagecache. That's OK: the
160  * old find_get_page using a lock could equally have run before or after
161  * such a re-insertion, depending on order that locks are granted.
162  *
163  * Lookups racing against pagecache insertion isn't a big problem: either 1
164  * will find the page or it will not. Likewise, the old find_get_page could run
165  * either before the insertion or afterwards, depending on timing.
166  */
page_cache_get_speculative(struct page * page)167 static inline int page_cache_get_speculative(struct page *page)
168 {
169 #ifdef CONFIG_TINY_RCU
170 # ifdef CONFIG_PREEMPT_COUNT
171 	VM_BUG_ON(!in_atomic() && !irqs_disabled());
172 # endif
173 	/*
174 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 	 * this for us.
176 	 *
177 	 * Pagecache won't be truncated from interrupt context, so if we have
178 	 * found a page in the radix tree here, we have pinned its refcount by
179 	 * disabling preempt, and hence no need for the "speculative get" that
180 	 * SMP requires.
181 	 */
182 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 	page_ref_inc(page);
184 
185 #else
186 	if (unlikely(!get_page_unless_zero(page))) {
187 		/*
188 		 * Either the page has been freed, or will be freed.
189 		 * In either case, retry here and the caller should
190 		 * do the right thing (see comments above).
191 		 */
192 		return 0;
193 	}
194 #endif
195 	VM_BUG_ON_PAGE(PageTail(page), page);
196 
197 	return 1;
198 }
199 
200 /*
201  * Same as above, but add instead of inc (could just be merged)
202  */
page_cache_add_speculative(struct page * page,int count)203 static inline int page_cache_add_speculative(struct page *page, int count)
204 {
205 	VM_BUG_ON(in_interrupt());
206 
207 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
208 # ifdef CONFIG_PREEMPT_COUNT
209 	VM_BUG_ON(!in_atomic() && !irqs_disabled());
210 # endif
211 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
212 	page_ref_add(page, count);
213 
214 #else
215 	if (unlikely(!page_ref_add_unless(page, count, 0)))
216 		return 0;
217 #endif
218 	VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
219 
220 	return 1;
221 }
222 
223 #ifdef CONFIG_NUMA
224 extern struct page *__page_cache_alloc(gfp_t gfp);
225 #else
__page_cache_alloc(gfp_t gfp)226 static inline struct page *__page_cache_alloc(gfp_t gfp)
227 {
228 	return alloc_pages(gfp, 0);
229 }
230 #endif
231 
page_cache_alloc(struct address_space * x)232 static inline struct page *page_cache_alloc(struct address_space *x)
233 {
234 	return __page_cache_alloc(mapping_gfp_mask(x));
235 }
236 
readahead_gfp_mask(struct address_space * x)237 static inline gfp_t readahead_gfp_mask(struct address_space *x)
238 {
239 	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
240 }
241 
242 typedef int filler_t(void *, struct page *);
243 
244 pgoff_t page_cache_next_hole(struct address_space *mapping,
245 			     pgoff_t index, unsigned long max_scan);
246 pgoff_t page_cache_prev_hole(struct address_space *mapping,
247 			     pgoff_t index, unsigned long max_scan);
248 
249 #define FGP_ACCESSED		0x00000001
250 #define FGP_LOCK		0x00000002
251 #define FGP_CREAT		0x00000004
252 #define FGP_WRITE		0x00000008
253 #define FGP_NOFS		0x00000010
254 #define FGP_NOWAIT		0x00000020
255 
256 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
257 		int fgp_flags, gfp_t cache_gfp_mask);
258 
259 /**
260  * find_get_page - find and get a page reference
261  * @mapping: the address_space to search
262  * @offset: the page index
263  *
264  * Looks up the page cache slot at @mapping & @offset.  If there is a
265  * page cache page, it is returned with an increased refcount.
266  *
267  * Otherwise, %NULL is returned.
268  */
find_get_page(struct address_space * mapping,pgoff_t offset)269 static inline struct page *find_get_page(struct address_space *mapping,
270 					pgoff_t offset)
271 {
272 	return pagecache_get_page(mapping, offset, 0, 0);
273 }
274 
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)275 static inline struct page *find_get_page_flags(struct address_space *mapping,
276 					pgoff_t offset, int fgp_flags)
277 {
278 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
279 }
280 
281 /**
282  * find_lock_page - locate, pin and lock a pagecache page
283  * @mapping: the address_space to search
284  * @offset: the page index
285  *
286  * Looks up the page cache slot at @mapping & @offset.  If there is a
287  * page cache page, it is returned locked and with an increased
288  * refcount.
289  *
290  * Otherwise, %NULL is returned.
291  *
292  * find_lock_page() may sleep.
293  */
find_lock_page(struct address_space * mapping,pgoff_t offset)294 static inline struct page *find_lock_page(struct address_space *mapping,
295 					pgoff_t offset)
296 {
297 	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
298 }
299 
300 /**
301  * find_or_create_page - locate or add a pagecache page
302  * @mapping: the page's address_space
303  * @index: the page's index into the mapping
304  * @gfp_mask: page allocation mode
305  *
306  * Looks up the page cache slot at @mapping & @offset.  If there is a
307  * page cache page, it is returned locked and with an increased
308  * refcount.
309  *
310  * If the page is not present, a new page is allocated using @gfp_mask
311  * and added to the page cache and the VM's LRU list.  The page is
312  * returned locked and with an increased refcount.
313  *
314  * On memory exhaustion, %NULL is returned.
315  *
316  * find_or_create_page() may sleep, even if @gfp_flags specifies an
317  * atomic allocation!
318  */
find_or_create_page(struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)319 static inline struct page *find_or_create_page(struct address_space *mapping,
320 					pgoff_t offset, gfp_t gfp_mask)
321 {
322 	return pagecache_get_page(mapping, offset,
323 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
324 					gfp_mask);
325 }
326 
327 /**
328  * grab_cache_page_nowait - returns locked page at given index in given cache
329  * @mapping: target address_space
330  * @index: the page index
331  *
332  * Same as grab_cache_page(), but do not wait if the page is unavailable.
333  * This is intended for speculative data generators, where the data can
334  * be regenerated if the page couldn't be grabbed.  This routine should
335  * be safe to call while holding the lock for another page.
336  *
337  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
338  * and deadlock against the caller's locked page.
339  */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)340 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
341 				pgoff_t index)
342 {
343 	return pagecache_get_page(mapping, index,
344 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
345 			mapping_gfp_mask(mapping));
346 }
347 
348 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
349 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
350 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
351 			  unsigned int nr_entries, struct page **entries,
352 			  pgoff_t *indices);
353 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
354 			pgoff_t end, unsigned int nr_pages,
355 			struct page **pages);
find_get_pages(struct address_space * mapping,pgoff_t * start,unsigned int nr_pages,struct page ** pages)356 static inline unsigned find_get_pages(struct address_space *mapping,
357 			pgoff_t *start, unsigned int nr_pages,
358 			struct page **pages)
359 {
360 	return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
361 				    pages);
362 }
363 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
364 			       unsigned int nr_pages, struct page **pages);
365 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
366 			pgoff_t end, int tag, unsigned int nr_pages,
367 			struct page **pages);
find_get_pages_tag(struct address_space * mapping,pgoff_t * index,int tag,unsigned int nr_pages,struct page ** pages)368 static inline unsigned find_get_pages_tag(struct address_space *mapping,
369 			pgoff_t *index, int tag, unsigned int nr_pages,
370 			struct page **pages)
371 {
372 	return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
373 					nr_pages, pages);
374 }
375 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
376 			int tag, unsigned int nr_entries,
377 			struct page **entries, pgoff_t *indices);
378 
379 struct page *grab_cache_page_write_begin(struct address_space *mapping,
380 			pgoff_t index, unsigned flags);
381 
382 /*
383  * Returns locked page at given index in given cache, creating it if needed.
384  */
grab_cache_page(struct address_space * mapping,pgoff_t index)385 static inline struct page *grab_cache_page(struct address_space *mapping,
386 								pgoff_t index)
387 {
388 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
389 }
390 
391 extern struct page * read_cache_page(struct address_space *mapping,
392 				pgoff_t index, filler_t *filler, void *data);
393 extern struct page * read_cache_page_gfp(struct address_space *mapping,
394 				pgoff_t index, gfp_t gfp_mask);
395 extern int read_cache_pages(struct address_space *mapping,
396 		struct list_head *pages, filler_t *filler, void *data);
397 
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)398 static inline struct page *read_mapping_page(struct address_space *mapping,
399 				pgoff_t index, void *data)
400 {
401 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
402 	return read_cache_page(mapping, index, filler, data);
403 }
404 
405 /*
406  * Get index of the page within radix-tree (but not for hugetlb pages).
407  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
408  */
page_to_index(struct page * page)409 static inline pgoff_t page_to_index(struct page *page)
410 {
411 	pgoff_t pgoff;
412 
413 	if (likely(!PageTransTail(page)))
414 		return page->index;
415 
416 	/*
417 	 *  We don't initialize ->index for tail pages: calculate based on
418 	 *  head page
419 	 */
420 	pgoff = compound_head(page)->index;
421 	pgoff += page - compound_head(page);
422 	return pgoff;
423 }
424 
425 extern pgoff_t hugetlb_basepage_index(struct page *page);
426 
427 /*
428  * Get the offset in PAGE_SIZE (even for hugetlb pages).
429  * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
430  */
page_to_pgoff(struct page * page)431 static inline pgoff_t page_to_pgoff(struct page *page)
432 {
433 	if (unlikely(PageHuge(page)))
434 		return hugetlb_basepage_index(page);
435 	return page_to_index(page);
436 }
437 
438 /*
439  * Return byte-offset into filesystem object for page.
440  */
page_offset(struct page * page)441 static inline loff_t page_offset(struct page *page)
442 {
443 	return ((loff_t)page->index) << PAGE_SHIFT;
444 }
445 
page_file_offset(struct page * page)446 static inline loff_t page_file_offset(struct page *page)
447 {
448 	return ((loff_t)page_index(page)) << PAGE_SHIFT;
449 }
450 
451 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
452 				     unsigned long address);
453 
linear_page_index(struct vm_area_struct * vma,unsigned long address)454 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
455 					unsigned long address)
456 {
457 	pgoff_t pgoff;
458 	if (unlikely(is_vm_hugetlb_page(vma)))
459 		return linear_hugepage_index(vma, address);
460 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
461 	pgoff += vma->vm_pgoff;
462 	return pgoff;
463 }
464 
465 extern void __lock_page(struct page *page);
466 extern int __lock_page_killable(struct page *page);
467 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
468 				unsigned int flags);
469 extern void unlock_page(struct page *page);
470 
trylock_page(struct page * page)471 static inline int trylock_page(struct page *page)
472 {
473 	page = compound_head(page);
474 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
475 }
476 
477 /*
478  * lock_page may only be called if we have the page's inode pinned.
479  */
lock_page(struct page * page)480 static inline void lock_page(struct page *page)
481 {
482 	might_sleep();
483 	if (!trylock_page(page))
484 		__lock_page(page);
485 }
486 
487 /*
488  * lock_page_killable is like lock_page but can be interrupted by fatal
489  * signals.  It returns 0 if it locked the page and -EINTR if it was
490  * killed while waiting.
491  */
lock_page_killable(struct page * page)492 static inline int lock_page_killable(struct page *page)
493 {
494 	might_sleep();
495 	if (!trylock_page(page))
496 		return __lock_page_killable(page);
497 	return 0;
498 }
499 
500 /*
501  * lock_page_or_retry - Lock the page, unless this would block and the
502  * caller indicated that it can handle a retry.
503  *
504  * Return value and mmap_sem implications depend on flags; see
505  * __lock_page_or_retry().
506  */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)507 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
508 				     unsigned int flags)
509 {
510 	might_sleep();
511 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
512 }
513 
514 /*
515  * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
516  * and should not be used directly.
517  */
518 extern void wait_on_page_bit(struct page *page, int bit_nr);
519 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
520 
521 /*
522  * Wait for a page to be unlocked.
523  *
524  * This must be called with the caller "holding" the page,
525  * ie with increased "page->count" so that the page won't
526  * go away during the wait..
527  */
wait_on_page_locked(struct page * page)528 static inline void wait_on_page_locked(struct page *page)
529 {
530 	if (PageLocked(page))
531 		wait_on_page_bit(compound_head(page), PG_locked);
532 }
533 
wait_on_page_locked_killable(struct page * page)534 static inline int wait_on_page_locked_killable(struct page *page)
535 {
536 	if (!PageLocked(page))
537 		return 0;
538 	return wait_on_page_bit_killable(compound_head(page), PG_locked);
539 }
540 
541 /*
542  * Wait for a page to complete writeback
543  */
wait_on_page_writeback(struct page * page)544 static inline void wait_on_page_writeback(struct page *page)
545 {
546 	if (PageWriteback(page))
547 		wait_on_page_bit(page, PG_writeback);
548 }
549 
550 extern void end_page_writeback(struct page *page);
551 void wait_for_stable_page(struct page *page);
552 
553 void page_endio(struct page *page, bool is_write, int err);
554 
555 /*
556  * Add an arbitrary waiter to a page's wait queue
557  */
558 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
559 
560 /*
561  * Fault everything in given userspace address range in.
562  */
fault_in_pages_writeable(char __user * uaddr,int size)563 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
564 {
565 	char __user *end = uaddr + size - 1;
566 
567 	if (unlikely(size == 0))
568 		return 0;
569 
570 	if (unlikely(uaddr > end))
571 		return -EFAULT;
572 	/*
573 	 * Writing zeroes into userspace here is OK, because we know that if
574 	 * the zero gets there, we'll be overwriting it.
575 	 */
576 	do {
577 		if (unlikely(__put_user(0, uaddr) != 0))
578 			return -EFAULT;
579 		uaddr += PAGE_SIZE;
580 	} while (uaddr <= end);
581 
582 	/* Check whether the range spilled into the next page. */
583 	if (((unsigned long)uaddr & PAGE_MASK) ==
584 			((unsigned long)end & PAGE_MASK))
585 		return __put_user(0, end);
586 
587 	return 0;
588 }
589 
fault_in_pages_readable(const char __user * uaddr,int size)590 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
591 {
592 	volatile char c;
593 	const char __user *end = uaddr + size - 1;
594 
595 	if (unlikely(size == 0))
596 		return 0;
597 
598 	if (unlikely(uaddr > end))
599 		return -EFAULT;
600 
601 	do {
602 		if (unlikely(__get_user(c, uaddr) != 0))
603 			return -EFAULT;
604 		uaddr += PAGE_SIZE;
605 	} while (uaddr <= end);
606 
607 	/* Check whether the range spilled into the next page. */
608 	if (((unsigned long)uaddr & PAGE_MASK) ==
609 			((unsigned long)end & PAGE_MASK)) {
610 		return __get_user(c, end);
611 	}
612 
613 	(void)c;
614 	return 0;
615 }
616 
617 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
618 				pgoff_t index, gfp_t gfp_mask);
619 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
620 				pgoff_t index, gfp_t gfp_mask);
621 extern void delete_from_page_cache(struct page *page);
622 extern void __delete_from_page_cache(struct page *page, void *shadow);
623 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
624 void delete_from_page_cache_batch(struct address_space *mapping,
625 				  struct pagevec *pvec);
626 
627 /*
628  * Like add_to_page_cache_locked, but used to add newly allocated pages:
629  * the page is new, so we can just run __SetPageLocked() against it.
630  */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)631 static inline int add_to_page_cache(struct page *page,
632 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
633 {
634 	int error;
635 
636 	__SetPageLocked(page);
637 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
638 	if (unlikely(error))
639 		__ClearPageLocked(page);
640 	return error;
641 }
642 
dir_pages(struct inode * inode)643 static inline unsigned long dir_pages(struct inode *inode)
644 {
645 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
646 			       PAGE_SHIFT;
647 }
648 
649 #endif /* _LINUX_PAGEMAP_H */
650