1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40 
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44 
45 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
46 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
48 
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
50 
init_dax_wait_table(void)51 static int __init init_dax_wait_table(void)
52 {
53 	int i;
54 
55 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 		init_waitqueue_head(wait_table + i);
57 	return 0;
58 }
59 fs_initcall(init_dax_wait_table);
60 
61 /*
62  * We use lowest available bit in exceptional entry for locking, one bit for
63  * the entry size (PMD) and two more to tell us if the entry is a zero page or
64  * an empty entry that is just used for locking.  In total four special bits.
65  *
66  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68  * block allocation.
69  */
70 #define RADIX_DAX_SHIFT		(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK	(1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE	(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75 
dax_radix_pfn(void * entry)76 static unsigned long dax_radix_pfn(void *entry)
77 {
78 	return (unsigned long)entry >> RADIX_DAX_SHIFT;
79 }
80 
dax_radix_locked_entry(unsigned long pfn,unsigned long flags)81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
82 {
83 	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 			(pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
85 }
86 
dax_radix_order(void * entry)87 static unsigned int dax_radix_order(void *entry)
88 {
89 	if ((unsigned long)entry & RADIX_DAX_PMD)
90 		return PMD_SHIFT - PAGE_SHIFT;
91 	return 0;
92 }
93 
dax_is_pmd_entry(void * entry)94 static int dax_is_pmd_entry(void *entry)
95 {
96 	return (unsigned long)entry & RADIX_DAX_PMD;
97 }
98 
dax_is_pte_entry(void * entry)99 static int dax_is_pte_entry(void *entry)
100 {
101 	return !((unsigned long)entry & RADIX_DAX_PMD);
102 }
103 
dax_is_zero_entry(void * entry)104 static int dax_is_zero_entry(void *entry)
105 {
106 	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107 }
108 
dax_is_empty_entry(void * entry)109 static int dax_is_empty_entry(void *entry)
110 {
111 	return (unsigned long)entry & RADIX_DAX_EMPTY;
112 }
113 
114 /*
115  * DAX radix tree locking
116  */
117 struct exceptional_entry_key {
118 	struct address_space *mapping;
119 	pgoff_t entry_start;
120 };
121 
122 struct wait_exceptional_entry_queue {
123 	wait_queue_entry_t wait;
124 	struct exceptional_entry_key key;
125 };
126 
dax_entry_waitqueue(struct address_space * mapping,pgoff_t index,void * entry,struct exceptional_entry_key * key)127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
129 {
130 	unsigned long hash;
131 
132 	/*
133 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 	 * queue to the start of that PMD.  This ensures that all offsets in
135 	 * the range covered by the PMD map to the same bit lock.
136 	 */
137 	if (dax_is_pmd_entry(entry))
138 		index &= ~PG_PMD_COLOUR;
139 
140 	key->mapping = mapping;
141 	key->entry_start = index;
142 
143 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 	return wait_table + hash;
145 }
146 
wake_exceptional_entry_func(wait_queue_entry_t * wait,unsigned int mode,int sync,void * keyp)147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 				       int sync, void *keyp)
149 {
150 	struct exceptional_entry_key *key = keyp;
151 	struct wait_exceptional_entry_queue *ewait =
152 		container_of(wait, struct wait_exceptional_entry_queue, wait);
153 
154 	if (key->mapping != ewait->key.mapping ||
155 	    key->entry_start != ewait->key.entry_start)
156 		return 0;
157 	return autoremove_wake_function(wait, mode, sync, NULL);
158 }
159 
160 /*
161  * @entry may no longer be the entry at the index in the mapping.
162  * The important information it's conveying is whether the entry at
163  * this index used to be a PMD entry.
164  */
dax_wake_mapping_entry_waiter(struct address_space * mapping,pgoff_t index,void * entry,bool wake_all)165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166 		pgoff_t index, void *entry, bool wake_all)
167 {
168 	struct exceptional_entry_key key;
169 	wait_queue_head_t *wq;
170 
171 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
172 
173 	/*
174 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
175 	 * under the i_pages lock, ditto for entry handling in our callers.
176 	 * So at this point all tasks that could have seen our entry locked
177 	 * must be in the waitqueue and the following check will see them.
178 	 */
179 	if (waitqueue_active(wq))
180 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
181 }
182 
183 /*
184  * Check whether the given slot is locked.  Must be called with the i_pages
185  * lock held.
186  */
slot_locked(struct address_space * mapping,void ** slot)187 static inline int slot_locked(struct address_space *mapping, void **slot)
188 {
189 	unsigned long entry = (unsigned long)
190 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191 	return entry & RADIX_DAX_ENTRY_LOCK;
192 }
193 
194 /*
195  * Mark the given slot as locked.  Must be called with the i_pages lock held.
196  */
lock_slot(struct address_space * mapping,void ** slot)197 static inline void *lock_slot(struct address_space *mapping, void **slot)
198 {
199 	unsigned long entry = (unsigned long)
200 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
201 
202 	entry |= RADIX_DAX_ENTRY_LOCK;
203 	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204 	return (void *)entry;
205 }
206 
207 /*
208  * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
209  */
unlock_slot(struct address_space * mapping,void ** slot)210 static inline void *unlock_slot(struct address_space *mapping, void **slot)
211 {
212 	unsigned long entry = (unsigned long)
213 		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
214 
215 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216 	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217 	return (void *)entry;
218 }
219 
220 static void put_unlocked_mapping_entry(struct address_space *mapping,
221 				       pgoff_t index, void *entry);
222 
223 /*
224  * Lookup entry in radix tree, wait for it to become unlocked if it is
225  * exceptional entry and return it. The caller must call
226  * put_unlocked_mapping_entry() when he decided not to lock the entry or
227  * put_locked_mapping_entry() when he locked the entry and now wants to
228  * unlock it.
229  *
230  * Must be called with the i_pages lock held.
231  */
get_unlocked_mapping_entry(struct address_space * mapping,pgoff_t index,void *** slotp)232 static void *get_unlocked_mapping_entry(struct address_space *mapping,
233 		pgoff_t index, void ***slotp)
234 {
235 	void *entry, **slot;
236 	struct wait_exceptional_entry_queue ewait;
237 	wait_queue_head_t *wq;
238 
239 	init_wait(&ewait.wait);
240 	ewait.wait.func = wake_exceptional_entry_func;
241 
242 	for (;;) {
243 		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
244 					  &slot);
245 		if (!entry ||
246 		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
247 		    !slot_locked(mapping, slot)) {
248 			if (slotp)
249 				*slotp = slot;
250 			return entry;
251 		}
252 
253 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
254 		prepare_to_wait_exclusive(wq, &ewait.wait,
255 					  TASK_UNINTERRUPTIBLE);
256 		xa_unlock_irq(&mapping->i_pages);
257 		schedule();
258 		finish_wait(wq, &ewait.wait);
259 		xa_lock_irq(&mapping->i_pages);
260 	}
261 }
262 
263 /*
264  * The only thing keeping the address space around is the i_pages lock
265  * (it's cycled in clear_inode() after removing the entries from i_pages)
266  * After we call xas_unlock_irq(), we cannot touch xas->xa.
267  */
wait_entry_unlocked(struct address_space * mapping,pgoff_t index,void *** slotp,void * entry)268 static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
269 		void ***slotp, void *entry)
270 {
271 	struct wait_exceptional_entry_queue ewait;
272 	wait_queue_head_t *wq;
273 
274 	init_wait(&ewait.wait);
275 	ewait.wait.func = wake_exceptional_entry_func;
276 
277 	wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
278 	/*
279 	 * Unlike get_unlocked_entry() there is no guarantee that this
280 	 * path ever successfully retrieves an unlocked entry before an
281 	 * inode dies. Perform a non-exclusive wait in case this path
282 	 * never successfully performs its own wake up.
283 	 */
284 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
285 	xa_unlock_irq(&mapping->i_pages);
286 	schedule();
287 	finish_wait(wq, &ewait.wait);
288 }
289 
unlock_mapping_entry(struct address_space * mapping,pgoff_t index)290 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
291 {
292 	void *entry, **slot;
293 
294 	xa_lock_irq(&mapping->i_pages);
295 	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
296 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
297 			 !slot_locked(mapping, slot))) {
298 		xa_unlock_irq(&mapping->i_pages);
299 		return;
300 	}
301 	unlock_slot(mapping, slot);
302 	xa_unlock_irq(&mapping->i_pages);
303 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
304 }
305 
put_locked_mapping_entry(struct address_space * mapping,pgoff_t index)306 static void put_locked_mapping_entry(struct address_space *mapping,
307 		pgoff_t index)
308 {
309 	unlock_mapping_entry(mapping, index);
310 }
311 
312 /*
313  * Called when we are done with radix tree entry we looked up via
314  * get_unlocked_mapping_entry() and which we didn't lock in the end.
315  */
put_unlocked_mapping_entry(struct address_space * mapping,pgoff_t index,void * entry)316 static void put_unlocked_mapping_entry(struct address_space *mapping,
317 				       pgoff_t index, void *entry)
318 {
319 	if (!entry)
320 		return;
321 
322 	/* We have to wake up next waiter for the radix tree entry lock */
323 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
324 }
325 
dax_entry_size(void * entry)326 static unsigned long dax_entry_size(void *entry)
327 {
328 	if (dax_is_zero_entry(entry))
329 		return 0;
330 	else if (dax_is_empty_entry(entry))
331 		return 0;
332 	else if (dax_is_pmd_entry(entry))
333 		return PMD_SIZE;
334 	else
335 		return PAGE_SIZE;
336 }
337 
dax_radix_end_pfn(void * entry)338 static unsigned long dax_radix_end_pfn(void *entry)
339 {
340 	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
341 }
342 
343 /*
344  * Iterate through all mapped pfns represented by an entry, i.e. skip
345  * 'empty' and 'zero' entries.
346  */
347 #define for_each_mapped_pfn(entry, pfn) \
348 	for (pfn = dax_radix_pfn(entry); \
349 			pfn < dax_radix_end_pfn(entry); pfn++)
350 
351 /*
352  * TODO: for reflink+dax we need a way to associate a single page with
353  * multiple address_space instances at different linear_page_index()
354  * offsets.
355  */
dax_associate_entry(void * entry,struct address_space * mapping,struct vm_area_struct * vma,unsigned long address)356 static void dax_associate_entry(void *entry, struct address_space *mapping,
357 		struct vm_area_struct *vma, unsigned long address)
358 {
359 	unsigned long size = dax_entry_size(entry), pfn, index;
360 	int i = 0;
361 
362 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
363 		return;
364 
365 	index = linear_page_index(vma, address & ~(size - 1));
366 	for_each_mapped_pfn(entry, pfn) {
367 		struct page *page = pfn_to_page(pfn);
368 
369 		WARN_ON_ONCE(page->mapping);
370 		page->mapping = mapping;
371 		page->index = index + i++;
372 	}
373 }
374 
dax_disassociate_entry(void * entry,struct address_space * mapping,bool trunc)375 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
376 		bool trunc)
377 {
378 	unsigned long pfn;
379 
380 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
381 		return;
382 
383 	for_each_mapped_pfn(entry, pfn) {
384 		struct page *page = pfn_to_page(pfn);
385 
386 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
387 		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
388 		page->mapping = NULL;
389 		page->index = 0;
390 	}
391 }
392 
dax_busy_page(void * entry)393 static struct page *dax_busy_page(void *entry)
394 {
395 	unsigned long pfn;
396 
397 	for_each_mapped_pfn(entry, pfn) {
398 		struct page *page = pfn_to_page(pfn);
399 
400 		if (page_ref_count(page) > 1)
401 			return page;
402 	}
403 	return NULL;
404 }
405 
dax_lock_mapping_entry(struct page * page)406 bool dax_lock_mapping_entry(struct page *page)
407 {
408 	pgoff_t index;
409 	struct inode *inode;
410 	bool did_lock = false;
411 	void *entry = NULL, **slot;
412 	struct address_space *mapping;
413 
414 	rcu_read_lock();
415 	for (;;) {
416 		mapping = READ_ONCE(page->mapping);
417 
418 		if (!mapping || !dax_mapping(mapping))
419 			break;
420 
421 		/*
422 		 * In the device-dax case there's no need to lock, a
423 		 * struct dev_pagemap pin is sufficient to keep the
424 		 * inode alive, and we assume we have dev_pagemap pin
425 		 * otherwise we would not have a valid pfn_to_page()
426 		 * translation.
427 		 */
428 		inode = mapping->host;
429 		if (S_ISCHR(inode->i_mode)) {
430 			did_lock = true;
431 			break;
432 		}
433 
434 		xa_lock_irq(&mapping->i_pages);
435 		if (mapping != page->mapping) {
436 			xa_unlock_irq(&mapping->i_pages);
437 			continue;
438 		}
439 		index = page->index;
440 
441 		entry = __radix_tree_lookup(&mapping->i_pages, index,
442 						NULL, &slot);
443 		if (!entry) {
444 			xa_unlock_irq(&mapping->i_pages);
445 			break;
446 		} else if (slot_locked(mapping, slot)) {
447 			rcu_read_unlock();
448 			wait_entry_unlocked(mapping, index, &slot, entry);
449 			rcu_read_lock();
450 			continue;
451 		}
452 		lock_slot(mapping, slot);
453 		did_lock = true;
454 		xa_unlock_irq(&mapping->i_pages);
455 		break;
456 	}
457 	rcu_read_unlock();
458 
459 	return did_lock;
460 }
461 
dax_unlock_mapping_entry(struct page * page)462 void dax_unlock_mapping_entry(struct page *page)
463 {
464 	struct address_space *mapping = page->mapping;
465 	struct inode *inode = mapping->host;
466 
467 	if (S_ISCHR(inode->i_mode))
468 		return;
469 
470 	unlock_mapping_entry(mapping, page->index);
471 }
472 
473 /*
474  * Find radix tree entry at given index. If it points to an exceptional entry,
475  * return it with the radix tree entry locked. If the radix tree doesn't
476  * contain given index, create an empty exceptional entry for the index and
477  * return with it locked.
478  *
479  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
480  * either return that locked entry or will return an error.  This error will
481  * happen if there are any 4k entries within the 2MiB range that we are
482  * requesting.
483  *
484  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
485  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
486  * insertion will fail if it finds any 4k entries already in the tree, and a
487  * 4k insertion will cause an existing 2MiB entry to be unmapped and
488  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
489  * well as 2MiB empty entries.
490  *
491  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
492  * real storage backing them.  We will leave these real 2MiB DAX entries in
493  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
494  *
495  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
496  * persistent memory the benefit is doubtful. We can add that later if we can
497  * show it helps.
498  */
grab_mapping_entry(struct address_space * mapping,pgoff_t index,unsigned long size_flag)499 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
500 		unsigned long size_flag)
501 {
502 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
503 	void *entry, **slot;
504 
505 restart:
506 	xa_lock_irq(&mapping->i_pages);
507 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
508 
509 	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
510 		entry = ERR_PTR(-EIO);
511 		goto out_unlock;
512 	}
513 
514 	if (entry) {
515 		if (size_flag & RADIX_DAX_PMD) {
516 			if (dax_is_pte_entry(entry)) {
517 				put_unlocked_mapping_entry(mapping, index,
518 						entry);
519 				entry = ERR_PTR(-EEXIST);
520 				goto out_unlock;
521 			}
522 		} else { /* trying to grab a PTE entry */
523 			if (dax_is_pmd_entry(entry) &&
524 			    (dax_is_zero_entry(entry) ||
525 			     dax_is_empty_entry(entry))) {
526 				pmd_downgrade = true;
527 			}
528 		}
529 	}
530 
531 	/* No entry for given index? Make sure radix tree is big enough. */
532 	if (!entry || pmd_downgrade) {
533 		int err;
534 
535 		if (pmd_downgrade) {
536 			/*
537 			 * Make sure 'entry' remains valid while we drop
538 			 * the i_pages lock.
539 			 */
540 			entry = lock_slot(mapping, slot);
541 		}
542 
543 		xa_unlock_irq(&mapping->i_pages);
544 		/*
545 		 * Besides huge zero pages the only other thing that gets
546 		 * downgraded are empty entries which don't need to be
547 		 * unmapped.
548 		 */
549 		if (pmd_downgrade && dax_is_zero_entry(entry))
550 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
551 							PG_PMD_NR, false);
552 
553 		err = radix_tree_preload(
554 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
555 		if (err) {
556 			if (pmd_downgrade)
557 				put_locked_mapping_entry(mapping, index);
558 			return ERR_PTR(err);
559 		}
560 		xa_lock_irq(&mapping->i_pages);
561 
562 		if (!entry) {
563 			/*
564 			 * We needed to drop the i_pages lock while calling
565 			 * radix_tree_preload() and we didn't have an entry to
566 			 * lock.  See if another thread inserted an entry at
567 			 * our index during this time.
568 			 */
569 			entry = __radix_tree_lookup(&mapping->i_pages, index,
570 					NULL, &slot);
571 			if (entry) {
572 				radix_tree_preload_end();
573 				xa_unlock_irq(&mapping->i_pages);
574 				goto restart;
575 			}
576 		}
577 
578 		if (pmd_downgrade) {
579 			dax_disassociate_entry(entry, mapping, false);
580 			radix_tree_delete(&mapping->i_pages, index);
581 			mapping->nrexceptional--;
582 			dax_wake_mapping_entry_waiter(mapping, index, entry,
583 					true);
584 		}
585 
586 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
587 
588 		err = __radix_tree_insert(&mapping->i_pages, index,
589 				dax_radix_order(entry), entry);
590 		radix_tree_preload_end();
591 		if (err) {
592 			xa_unlock_irq(&mapping->i_pages);
593 			/*
594 			 * Our insertion of a DAX entry failed, most likely
595 			 * because we were inserting a PMD entry and it
596 			 * collided with a PTE sized entry at a different
597 			 * index in the PMD range.  We haven't inserted
598 			 * anything into the radix tree and have no waiters to
599 			 * wake.
600 			 */
601 			return ERR_PTR(err);
602 		}
603 		/* Good, we have inserted empty locked entry into the tree. */
604 		mapping->nrexceptional++;
605 		xa_unlock_irq(&mapping->i_pages);
606 		return entry;
607 	}
608 	entry = lock_slot(mapping, slot);
609  out_unlock:
610 	xa_unlock_irq(&mapping->i_pages);
611 	return entry;
612 }
613 
614 /**
615  * dax_layout_busy_page - find first pinned page in @mapping
616  * @mapping: address space to scan for a page with ref count > 1
617  *
618  * DAX requires ZONE_DEVICE mapped pages. These pages are never
619  * 'onlined' to the page allocator so they are considered idle when
620  * page->count == 1. A filesystem uses this interface to determine if
621  * any page in the mapping is busy, i.e. for DMA, or other
622  * get_user_pages() usages.
623  *
624  * It is expected that the filesystem is holding locks to block the
625  * establishment of new mappings in this address_space. I.e. it expects
626  * to be able to run unmap_mapping_range() and subsequently not race
627  * mapping_mapped() becoming true.
628  */
dax_layout_busy_page(struct address_space * mapping)629 struct page *dax_layout_busy_page(struct address_space *mapping)
630 {
631 	pgoff_t	indices[PAGEVEC_SIZE];
632 	struct page *page = NULL;
633 	struct pagevec pvec;
634 	pgoff_t	index, end;
635 	unsigned i;
636 
637 	/*
638 	 * In the 'limited' case get_user_pages() for dax is disabled.
639 	 */
640 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
641 		return NULL;
642 
643 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
644 		return NULL;
645 
646 	pagevec_init(&pvec);
647 	index = 0;
648 	end = -1;
649 
650 	/*
651 	 * If we race get_user_pages_fast() here either we'll see the
652 	 * elevated page count in the pagevec_lookup and wait, or
653 	 * get_user_pages_fast() will see that the page it took a reference
654 	 * against is no longer mapped in the page tables and bail to the
655 	 * get_user_pages() slow path.  The slow path is protected by
656 	 * pte_lock() and pmd_lock(). New references are not taken without
657 	 * holding those locks, and unmap_mapping_range() will not zero the
658 	 * pte or pmd without holding the respective lock, so we are
659 	 * guaranteed to either see new references or prevent new
660 	 * references from being established.
661 	 */
662 	unmap_mapping_range(mapping, 0, 0, 0);
663 
664 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
665 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
666 				indices)) {
667 		pgoff_t nr_pages = 1;
668 
669 		for (i = 0; i < pagevec_count(&pvec); i++) {
670 			struct page *pvec_ent = pvec.pages[i];
671 			void *entry;
672 
673 			index = indices[i];
674 			if (index >= end)
675 				break;
676 
677 			if (WARN_ON_ONCE(
678 			     !radix_tree_exceptional_entry(pvec_ent)))
679 				continue;
680 
681 			xa_lock_irq(&mapping->i_pages);
682 			entry = get_unlocked_mapping_entry(mapping, index, NULL);
683 			if (entry) {
684 				page = dax_busy_page(entry);
685 				/*
686 				 * Account for multi-order entries at
687 				 * the end of the pagevec.
688 				 */
689 				if (i + 1 >= pagevec_count(&pvec))
690 					nr_pages = 1UL << dax_radix_order(entry);
691 			}
692 			put_unlocked_mapping_entry(mapping, index, entry);
693 			xa_unlock_irq(&mapping->i_pages);
694 			if (page)
695 				break;
696 		}
697 
698 		/*
699 		 * We don't expect normal struct page entries to exist in our
700 		 * tree, but we keep these pagevec calls so that this code is
701 		 * consistent with the common pattern for handling pagevecs
702 		 * throughout the kernel.
703 		 */
704 		pagevec_remove_exceptionals(&pvec);
705 		pagevec_release(&pvec);
706 		index += nr_pages;
707 
708 		if (page)
709 			break;
710 	}
711 	return page;
712 }
713 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
714 
__dax_invalidate_mapping_entry(struct address_space * mapping,pgoff_t index,bool trunc)715 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
716 					  pgoff_t index, bool trunc)
717 {
718 	int ret = 0;
719 	void *entry;
720 	struct radix_tree_root *pages = &mapping->i_pages;
721 
722 	xa_lock_irq(pages);
723 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
724 	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
725 		goto out;
726 	if (!trunc &&
727 	    (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
728 	     radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
729 		goto out;
730 	dax_disassociate_entry(entry, mapping, trunc);
731 	radix_tree_delete(pages, index);
732 	mapping->nrexceptional--;
733 	ret = 1;
734 out:
735 	put_unlocked_mapping_entry(mapping, index, entry);
736 	xa_unlock_irq(pages);
737 	return ret;
738 }
739 /*
740  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
741  * entry to get unlocked before deleting it.
742  */
dax_delete_mapping_entry(struct address_space * mapping,pgoff_t index)743 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
744 {
745 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
746 
747 	/*
748 	 * This gets called from truncate / punch_hole path. As such, the caller
749 	 * must hold locks protecting against concurrent modifications of the
750 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
751 	 * caller has seen exceptional entry for this index, we better find it
752 	 * at that index as well...
753 	 */
754 	WARN_ON_ONCE(!ret);
755 	return ret;
756 }
757 
758 /*
759  * Invalidate exceptional DAX entry if it is clean.
760  */
dax_invalidate_mapping_entry_sync(struct address_space * mapping,pgoff_t index)761 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
762 				      pgoff_t index)
763 {
764 	return __dax_invalidate_mapping_entry(mapping, index, false);
765 }
766 
copy_user_dax(struct block_device * bdev,struct dax_device * dax_dev,sector_t sector,size_t size,struct page * to,unsigned long vaddr)767 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
768 		sector_t sector, size_t size, struct page *to,
769 		unsigned long vaddr)
770 {
771 	void *vto, *kaddr;
772 	pgoff_t pgoff;
773 	long rc;
774 	int id;
775 
776 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
777 	if (rc)
778 		return rc;
779 
780 	id = dax_read_lock();
781 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
782 	if (rc < 0) {
783 		dax_read_unlock(id);
784 		return rc;
785 	}
786 	vto = kmap_atomic(to);
787 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
788 	kunmap_atomic(vto);
789 	dax_read_unlock(id);
790 	return 0;
791 }
792 
793 /*
794  * By this point grab_mapping_entry() has ensured that we have a locked entry
795  * of the appropriate size so we don't have to worry about downgrading PMDs to
796  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
797  * already in the tree, we will skip the insertion and just dirty the PMD as
798  * appropriate.
799  */
dax_insert_mapping_entry(struct address_space * mapping,struct vm_fault * vmf,void * entry,pfn_t pfn_t,unsigned long flags,bool dirty)800 static void *dax_insert_mapping_entry(struct address_space *mapping,
801 				      struct vm_fault *vmf,
802 				      void *entry, pfn_t pfn_t,
803 				      unsigned long flags, bool dirty)
804 {
805 	struct radix_tree_root *pages = &mapping->i_pages;
806 	unsigned long pfn = pfn_t_to_pfn(pfn_t);
807 	pgoff_t index = vmf->pgoff;
808 	void *new_entry;
809 
810 	if (dirty)
811 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
812 
813 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
814 		/* we are replacing a zero page with block mapping */
815 		if (dax_is_pmd_entry(entry))
816 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
817 							PG_PMD_NR, false);
818 		else /* pte entry */
819 			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
820 	}
821 
822 	xa_lock_irq(pages);
823 	new_entry = dax_radix_locked_entry(pfn, flags);
824 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
825 		dax_disassociate_entry(entry, mapping, false);
826 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
827 	}
828 
829 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
830 		/*
831 		 * Only swap our new entry into the radix tree if the current
832 		 * entry is a zero page or an empty entry.  If a normal PTE or
833 		 * PMD entry is already in the tree, we leave it alone.  This
834 		 * means that if we are trying to insert a PTE and the
835 		 * existing entry is a PMD, we will just leave the PMD in the
836 		 * tree and dirty it if necessary.
837 		 */
838 		struct radix_tree_node *node;
839 		void **slot;
840 		void *ret;
841 
842 		ret = __radix_tree_lookup(pages, index, &node, &slot);
843 		WARN_ON_ONCE(ret != entry);
844 		__radix_tree_replace(pages, node, slot,
845 				     new_entry, NULL);
846 		entry = new_entry;
847 	}
848 
849 	if (dirty)
850 		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
851 
852 	xa_unlock_irq(pages);
853 	return entry;
854 }
855 
856 static inline unsigned long
pgoff_address(pgoff_t pgoff,struct vm_area_struct * vma)857 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
858 {
859 	unsigned long address;
860 
861 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
862 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
863 	return address;
864 }
865 
866 /* Walk all mappings of a given index of a file and writeprotect them */
dax_mapping_entry_mkclean(struct address_space * mapping,pgoff_t index,unsigned long pfn)867 static void dax_mapping_entry_mkclean(struct address_space *mapping,
868 				      pgoff_t index, unsigned long pfn)
869 {
870 	struct vm_area_struct *vma;
871 	pte_t pte, *ptep = NULL;
872 	pmd_t *pmdp = NULL;
873 	spinlock_t *ptl;
874 
875 	i_mmap_lock_read(mapping);
876 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
877 		unsigned long address, start, end;
878 
879 		cond_resched();
880 
881 		if (!(vma->vm_flags & VM_SHARED))
882 			continue;
883 
884 		address = pgoff_address(index, vma);
885 
886 		/*
887 		 * Note because we provide start/end to follow_pte_pmd it will
888 		 * call mmu_notifier_invalidate_range_start() on our behalf
889 		 * before taking any lock.
890 		 */
891 		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
892 			continue;
893 
894 		/*
895 		 * No need to call mmu_notifier_invalidate_range() as we are
896 		 * downgrading page table protection not changing it to point
897 		 * to a new page.
898 		 *
899 		 * See Documentation/vm/mmu_notifier.rst
900 		 */
901 		if (pmdp) {
902 #ifdef CONFIG_FS_DAX_PMD
903 			pmd_t pmd;
904 
905 			if (pfn != pmd_pfn(*pmdp))
906 				goto unlock_pmd;
907 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
908 				goto unlock_pmd;
909 
910 			flush_cache_range(vma, address,
911 					  address + HPAGE_PMD_SIZE);
912 			pmd = pmdp_invalidate(vma, address, pmdp);
913 			pmd = pmd_wrprotect(pmd);
914 			pmd = pmd_mkclean(pmd);
915 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
916 unlock_pmd:
917 #endif
918 			spin_unlock(ptl);
919 		} else {
920 			if (pfn != pte_pfn(*ptep))
921 				goto unlock_pte;
922 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
923 				goto unlock_pte;
924 
925 			flush_cache_page(vma, address, pfn);
926 			pte = ptep_clear_flush(vma, address, ptep);
927 			pte = pte_wrprotect(pte);
928 			pte = pte_mkclean(pte);
929 			set_pte_at(vma->vm_mm, address, ptep, pte);
930 unlock_pte:
931 			pte_unmap_unlock(ptep, ptl);
932 		}
933 
934 		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
935 	}
936 	i_mmap_unlock_read(mapping);
937 }
938 
dax_writeback_one(struct dax_device * dax_dev,struct address_space * mapping,pgoff_t index,void * entry)939 static int dax_writeback_one(struct dax_device *dax_dev,
940 		struct address_space *mapping, pgoff_t index, void *entry)
941 {
942 	struct radix_tree_root *pages = &mapping->i_pages;
943 	void *entry2, **slot;
944 	unsigned long pfn;
945 	long ret = 0;
946 	size_t size;
947 
948 	/*
949 	 * A page got tagged dirty in DAX mapping? Something is seriously
950 	 * wrong.
951 	 */
952 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
953 		return -EIO;
954 
955 	xa_lock_irq(pages);
956 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
957 	/* Entry got punched out / reallocated? */
958 	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
959 		goto put_unlocked;
960 	/*
961 	 * Entry got reallocated elsewhere? No need to writeback. We have to
962 	 * compare pfns as we must not bail out due to difference in lockbit
963 	 * or entry type.
964 	 */
965 	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
966 		goto put_unlocked;
967 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
968 				dax_is_zero_entry(entry))) {
969 		ret = -EIO;
970 		goto put_unlocked;
971 	}
972 
973 	/* Another fsync thread may have already written back this entry */
974 	if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
975 		goto put_unlocked;
976 	/* Lock the entry to serialize with page faults */
977 	entry = lock_slot(mapping, slot);
978 	/*
979 	 * We can clear the tag now but we have to be careful so that concurrent
980 	 * dax_writeback_one() calls for the same index cannot finish before we
981 	 * actually flush the caches. This is achieved as the calls will look
982 	 * at the entry only under the i_pages lock and once they do that
983 	 * they will see the entry locked and wait for it to unlock.
984 	 */
985 	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
986 	xa_unlock_irq(pages);
987 
988 	/*
989 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
990 	 * in the middle of a PMD, the 'index' we are given will be aligned to
991 	 * the start index of the PMD, as will the pfn we pull from 'entry'.
992 	 * This allows us to flush for PMD_SIZE and not have to worry about
993 	 * partial PMD writebacks.
994 	 */
995 	pfn = dax_radix_pfn(entry);
996 	size = PAGE_SIZE << dax_radix_order(entry);
997 
998 	dax_mapping_entry_mkclean(mapping, index, pfn);
999 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
1000 	/*
1001 	 * After we have flushed the cache, we can clear the dirty tag. There
1002 	 * cannot be new dirty data in the pfn after the flush has completed as
1003 	 * the pfn mappings are writeprotected and fault waits for mapping
1004 	 * entry lock.
1005 	 */
1006 	xa_lock_irq(pages);
1007 	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
1008 	xa_unlock_irq(pages);
1009 	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
1010 	put_locked_mapping_entry(mapping, index);
1011 	return ret;
1012 
1013  put_unlocked:
1014 	put_unlocked_mapping_entry(mapping, index, entry2);
1015 	xa_unlock_irq(pages);
1016 	return ret;
1017 }
1018 
1019 /*
1020  * Flush the mapping to the persistent domain within the byte range of [start,
1021  * end]. This is required by data integrity operations to ensure file data is
1022  * on persistent storage prior to completion of the operation.
1023  */
dax_writeback_mapping_range(struct address_space * mapping,struct block_device * bdev,struct writeback_control * wbc)1024 int dax_writeback_mapping_range(struct address_space *mapping,
1025 		struct block_device *bdev, struct writeback_control *wbc)
1026 {
1027 	struct inode *inode = mapping->host;
1028 	pgoff_t start_index, end_index;
1029 	pgoff_t indices[PAGEVEC_SIZE];
1030 	struct dax_device *dax_dev;
1031 	struct pagevec pvec;
1032 	bool done = false;
1033 	int i, ret = 0;
1034 
1035 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1036 		return -EIO;
1037 
1038 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
1039 		return 0;
1040 
1041 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
1042 	if (!dax_dev)
1043 		return -EIO;
1044 
1045 	start_index = wbc->range_start >> PAGE_SHIFT;
1046 	end_index = wbc->range_end >> PAGE_SHIFT;
1047 
1048 	trace_dax_writeback_range(inode, start_index, end_index);
1049 
1050 	tag_pages_for_writeback(mapping, start_index, end_index);
1051 
1052 	pagevec_init(&pvec);
1053 	while (!done) {
1054 		pvec.nr = find_get_entries_tag(mapping, start_index,
1055 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
1056 				pvec.pages, indices);
1057 
1058 		if (pvec.nr == 0)
1059 			break;
1060 
1061 		for (i = 0; i < pvec.nr; i++) {
1062 			if (indices[i] > end_index) {
1063 				done = true;
1064 				break;
1065 			}
1066 
1067 			ret = dax_writeback_one(dax_dev, mapping, indices[i],
1068 					pvec.pages[i]);
1069 			if (ret < 0) {
1070 				mapping_set_error(mapping, ret);
1071 				goto out;
1072 			}
1073 		}
1074 		start_index = indices[pvec.nr - 1] + 1;
1075 	}
1076 out:
1077 	put_dax(dax_dev);
1078 	trace_dax_writeback_range_done(inode, start_index, end_index);
1079 	return (ret < 0 ? ret : 0);
1080 }
1081 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1082 
dax_iomap_sector(struct iomap * iomap,loff_t pos)1083 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1084 {
1085 	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
1086 }
1087 
dax_iomap_pfn(struct iomap * iomap,loff_t pos,size_t size,pfn_t * pfnp)1088 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
1089 			 pfn_t *pfnp)
1090 {
1091 	const sector_t sector = dax_iomap_sector(iomap, pos);
1092 	pgoff_t pgoff;
1093 	int id, rc;
1094 	long length;
1095 
1096 	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
1097 	if (rc)
1098 		return rc;
1099 	id = dax_read_lock();
1100 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1101 				   NULL, pfnp);
1102 	if (length < 0) {
1103 		rc = length;
1104 		goto out;
1105 	}
1106 	rc = -EINVAL;
1107 	if (PFN_PHYS(length) < size)
1108 		goto out;
1109 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1110 		goto out;
1111 	/* For larger pages we need devmap */
1112 	if (length > 1 && !pfn_t_devmap(*pfnp))
1113 		goto out;
1114 	rc = 0;
1115 out:
1116 	dax_read_unlock(id);
1117 	return rc;
1118 }
1119 
1120 /*
1121  * The user has performed a load from a hole in the file.  Allocating a new
1122  * page in the file would cause excessive storage usage for workloads with
1123  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1124  * If this page is ever written to we will re-fault and change the mapping to
1125  * point to real DAX storage instead.
1126  */
dax_load_hole(struct address_space * mapping,void * entry,struct vm_fault * vmf)1127 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1128 			 struct vm_fault *vmf)
1129 {
1130 	struct inode *inode = mapping->host;
1131 	unsigned long vaddr = vmf->address;
1132 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1133 	vm_fault_t ret;
1134 
1135 	dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1136 			false);
1137 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1138 	trace_dax_load_hole(inode, vmf, ret);
1139 	return ret;
1140 }
1141 
dax_range_is_aligned(struct block_device * bdev,unsigned int offset,unsigned int length)1142 static bool dax_range_is_aligned(struct block_device *bdev,
1143 				 unsigned int offset, unsigned int length)
1144 {
1145 	unsigned short sector_size = bdev_logical_block_size(bdev);
1146 
1147 	if (!IS_ALIGNED(offset, sector_size))
1148 		return false;
1149 	if (!IS_ALIGNED(length, sector_size))
1150 		return false;
1151 
1152 	return true;
1153 }
1154 
__dax_zero_page_range(struct block_device * bdev,struct dax_device * dax_dev,sector_t sector,unsigned int offset,unsigned int size)1155 int __dax_zero_page_range(struct block_device *bdev,
1156 		struct dax_device *dax_dev, sector_t sector,
1157 		unsigned int offset, unsigned int size)
1158 {
1159 	if (dax_range_is_aligned(bdev, offset, size)) {
1160 		sector_t start_sector = sector + (offset >> 9);
1161 
1162 		return blkdev_issue_zeroout(bdev, start_sector,
1163 				size >> 9, GFP_NOFS, 0);
1164 	} else {
1165 		pgoff_t pgoff;
1166 		long rc, id;
1167 		void *kaddr;
1168 
1169 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1170 		if (rc)
1171 			return rc;
1172 
1173 		id = dax_read_lock();
1174 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1175 		if (rc < 0) {
1176 			dax_read_unlock(id);
1177 			return rc;
1178 		}
1179 		memset(kaddr + offset, 0, size);
1180 		dax_flush(dax_dev, kaddr + offset, size);
1181 		dax_read_unlock(id);
1182 	}
1183 	return 0;
1184 }
1185 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1186 
1187 static loff_t
dax_iomap_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1188 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1189 		struct iomap *iomap)
1190 {
1191 	struct block_device *bdev = iomap->bdev;
1192 	struct dax_device *dax_dev = iomap->dax_dev;
1193 	struct iov_iter *iter = data;
1194 	loff_t end = pos + length, done = 0;
1195 	ssize_t ret = 0;
1196 	size_t xfer;
1197 	int id;
1198 
1199 	if (iov_iter_rw(iter) == READ) {
1200 		end = min(end, i_size_read(inode));
1201 		if (pos >= end)
1202 			return 0;
1203 
1204 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1205 			return iov_iter_zero(min(length, end - pos), iter);
1206 	}
1207 
1208 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1209 		return -EIO;
1210 
1211 	/*
1212 	 * Write can allocate block for an area which has a hole page mapped
1213 	 * into page tables. We have to tear down these mappings so that data
1214 	 * written by write(2) is visible in mmap.
1215 	 */
1216 	if (iomap->flags & IOMAP_F_NEW) {
1217 		invalidate_inode_pages2_range(inode->i_mapping,
1218 					      pos >> PAGE_SHIFT,
1219 					      (end - 1) >> PAGE_SHIFT);
1220 	}
1221 
1222 	id = dax_read_lock();
1223 	while (pos < end) {
1224 		unsigned offset = pos & (PAGE_SIZE - 1);
1225 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1226 		const sector_t sector = dax_iomap_sector(iomap, pos);
1227 		ssize_t map_len;
1228 		pgoff_t pgoff;
1229 		void *kaddr;
1230 
1231 		if (fatal_signal_pending(current)) {
1232 			ret = -EINTR;
1233 			break;
1234 		}
1235 
1236 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1237 		if (ret)
1238 			break;
1239 
1240 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1241 				&kaddr, NULL);
1242 		if (map_len < 0) {
1243 			ret = map_len;
1244 			break;
1245 		}
1246 
1247 		map_len = PFN_PHYS(map_len);
1248 		kaddr += offset;
1249 		map_len -= offset;
1250 		if (map_len > end - pos)
1251 			map_len = end - pos;
1252 
1253 		/*
1254 		 * The userspace address for the memory copy has already been
1255 		 * validated via access_ok() in either vfs_read() or
1256 		 * vfs_write(), depending on which operation we are doing.
1257 		 */
1258 		if (iov_iter_rw(iter) == WRITE)
1259 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1260 					map_len, iter);
1261 		else
1262 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1263 					map_len, iter);
1264 
1265 		pos += xfer;
1266 		length -= xfer;
1267 		done += xfer;
1268 
1269 		if (xfer == 0)
1270 			ret = -EFAULT;
1271 		if (xfer < map_len)
1272 			break;
1273 	}
1274 	dax_read_unlock(id);
1275 
1276 	return done ? done : ret;
1277 }
1278 
1279 /**
1280  * dax_iomap_rw - Perform I/O to a DAX file
1281  * @iocb:	The control block for this I/O
1282  * @iter:	The addresses to do I/O from or to
1283  * @ops:	iomap ops passed from the file system
1284  *
1285  * This function performs read and write operations to directly mapped
1286  * persistent memory.  The callers needs to take care of read/write exclusion
1287  * and evicting any page cache pages in the region under I/O.
1288  */
1289 ssize_t
dax_iomap_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops)1290 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1291 		const struct iomap_ops *ops)
1292 {
1293 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1294 	struct inode *inode = mapping->host;
1295 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1296 	unsigned flags = 0;
1297 
1298 	if (iov_iter_rw(iter) == WRITE) {
1299 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1300 		flags |= IOMAP_WRITE;
1301 	} else {
1302 		lockdep_assert_held(&inode->i_rwsem);
1303 	}
1304 
1305 	if (iocb->ki_flags & IOCB_NOWAIT)
1306 		flags |= IOMAP_NOWAIT;
1307 
1308 	while (iov_iter_count(iter)) {
1309 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1310 				iter, dax_iomap_actor);
1311 		if (ret <= 0)
1312 			break;
1313 		pos += ret;
1314 		done += ret;
1315 	}
1316 
1317 	iocb->ki_pos += done;
1318 	return done ? done : ret;
1319 }
1320 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1321 
dax_fault_return(int error)1322 static vm_fault_t dax_fault_return(int error)
1323 {
1324 	if (error == 0)
1325 		return VM_FAULT_NOPAGE;
1326 	if (error == -ENOMEM)
1327 		return VM_FAULT_OOM;
1328 	return VM_FAULT_SIGBUS;
1329 }
1330 
1331 /*
1332  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1333  * flushed on write-faults (non-cow), but not read-faults.
1334  */
dax_fault_is_synchronous(unsigned long flags,struct vm_area_struct * vma,struct iomap * iomap)1335 static bool dax_fault_is_synchronous(unsigned long flags,
1336 		struct vm_area_struct *vma, struct iomap *iomap)
1337 {
1338 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1339 		&& (iomap->flags & IOMAP_F_DIRTY);
1340 }
1341 
dax_iomap_pte_fault(struct vm_fault * vmf,pfn_t * pfnp,int * iomap_errp,const struct iomap_ops * ops)1342 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1343 			       int *iomap_errp, const struct iomap_ops *ops)
1344 {
1345 	struct vm_area_struct *vma = vmf->vma;
1346 	struct address_space *mapping = vma->vm_file->f_mapping;
1347 	struct inode *inode = mapping->host;
1348 	unsigned long vaddr = vmf->address;
1349 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1350 	struct iomap iomap = { 0 };
1351 	unsigned flags = IOMAP_FAULT;
1352 	int error, major = 0;
1353 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1354 	bool sync;
1355 	vm_fault_t ret = 0;
1356 	void *entry;
1357 	pfn_t pfn;
1358 
1359 	trace_dax_pte_fault(inode, vmf, ret);
1360 	/*
1361 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1362 	 * to hold locks serializing us with truncate / punch hole so this is
1363 	 * a reliable test.
1364 	 */
1365 	if (pos >= i_size_read(inode)) {
1366 		ret = VM_FAULT_SIGBUS;
1367 		goto out;
1368 	}
1369 
1370 	if (write && !vmf->cow_page)
1371 		flags |= IOMAP_WRITE;
1372 
1373 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1374 	if (IS_ERR(entry)) {
1375 		ret = dax_fault_return(PTR_ERR(entry));
1376 		goto out;
1377 	}
1378 
1379 	/*
1380 	 * It is possible, particularly with mixed reads & writes to private
1381 	 * mappings, that we have raced with a PMD fault that overlaps with
1382 	 * the PTE we need to set up.  If so just return and the fault will be
1383 	 * retried.
1384 	 */
1385 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1386 		ret = VM_FAULT_NOPAGE;
1387 		goto unlock_entry;
1388 	}
1389 
1390 	/*
1391 	 * Note that we don't bother to use iomap_apply here: DAX required
1392 	 * the file system block size to be equal the page size, which means
1393 	 * that we never have to deal with more than a single extent here.
1394 	 */
1395 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1396 	if (iomap_errp)
1397 		*iomap_errp = error;
1398 	if (error) {
1399 		ret = dax_fault_return(error);
1400 		goto unlock_entry;
1401 	}
1402 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1403 		error = -EIO;	/* fs corruption? */
1404 		goto error_finish_iomap;
1405 	}
1406 
1407 	if (vmf->cow_page) {
1408 		sector_t sector = dax_iomap_sector(&iomap, pos);
1409 
1410 		switch (iomap.type) {
1411 		case IOMAP_HOLE:
1412 		case IOMAP_UNWRITTEN:
1413 			clear_user_highpage(vmf->cow_page, vaddr);
1414 			break;
1415 		case IOMAP_MAPPED:
1416 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1417 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1418 			break;
1419 		default:
1420 			WARN_ON_ONCE(1);
1421 			error = -EIO;
1422 			break;
1423 		}
1424 
1425 		if (error)
1426 			goto error_finish_iomap;
1427 
1428 		__SetPageUptodate(vmf->cow_page);
1429 		ret = finish_fault(vmf);
1430 		if (!ret)
1431 			ret = VM_FAULT_DONE_COW;
1432 		goto finish_iomap;
1433 	}
1434 
1435 	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1436 
1437 	switch (iomap.type) {
1438 	case IOMAP_MAPPED:
1439 		if (iomap.flags & IOMAP_F_NEW) {
1440 			count_vm_event(PGMAJFAULT);
1441 			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1442 			major = VM_FAULT_MAJOR;
1443 		}
1444 		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1445 		if (error < 0)
1446 			goto error_finish_iomap;
1447 
1448 		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1449 						 0, write && !sync);
1450 
1451 		/*
1452 		 * If we are doing synchronous page fault and inode needs fsync,
1453 		 * we can insert PTE into page tables only after that happens.
1454 		 * Skip insertion for now and return the pfn so that caller can
1455 		 * insert it after fsync is done.
1456 		 */
1457 		if (sync) {
1458 			if (WARN_ON_ONCE(!pfnp)) {
1459 				error = -EIO;
1460 				goto error_finish_iomap;
1461 			}
1462 			*pfnp = pfn;
1463 			ret = VM_FAULT_NEEDDSYNC | major;
1464 			goto finish_iomap;
1465 		}
1466 		trace_dax_insert_mapping(inode, vmf, entry);
1467 		if (write)
1468 			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1469 		else
1470 			ret = vmf_insert_mixed(vma, vaddr, pfn);
1471 
1472 		goto finish_iomap;
1473 	case IOMAP_UNWRITTEN:
1474 	case IOMAP_HOLE:
1475 		if (!write) {
1476 			ret = dax_load_hole(mapping, entry, vmf);
1477 			goto finish_iomap;
1478 		}
1479 		/*FALLTHRU*/
1480 	default:
1481 		WARN_ON_ONCE(1);
1482 		error = -EIO;
1483 		break;
1484 	}
1485 
1486  error_finish_iomap:
1487 	ret = dax_fault_return(error);
1488  finish_iomap:
1489 	if (ops->iomap_end) {
1490 		int copied = PAGE_SIZE;
1491 
1492 		if (ret & VM_FAULT_ERROR)
1493 			copied = 0;
1494 		/*
1495 		 * The fault is done by now and there's no way back (other
1496 		 * thread may be already happily using PTE we have installed).
1497 		 * Just ignore error from ->iomap_end since we cannot do much
1498 		 * with it.
1499 		 */
1500 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1501 	}
1502  unlock_entry:
1503 	put_locked_mapping_entry(mapping, vmf->pgoff);
1504  out:
1505 	trace_dax_pte_fault_done(inode, vmf, ret);
1506 	return ret | major;
1507 }
1508 
1509 #ifdef CONFIG_FS_DAX_PMD
dax_pmd_load_hole(struct vm_fault * vmf,struct iomap * iomap,void * entry)1510 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1511 		void *entry)
1512 {
1513 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1514 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1515 	struct inode *inode = mapping->host;
1516 	struct page *zero_page;
1517 	void *ret = NULL;
1518 	spinlock_t *ptl;
1519 	pmd_t pmd_entry;
1520 	pfn_t pfn;
1521 
1522 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1523 
1524 	if (unlikely(!zero_page))
1525 		goto fallback;
1526 
1527 	pfn = page_to_pfn_t(zero_page);
1528 	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1529 			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1530 
1531 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1532 	if (!pmd_none(*(vmf->pmd))) {
1533 		spin_unlock(ptl);
1534 		goto fallback;
1535 	}
1536 
1537 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1538 	pmd_entry = pmd_mkhuge(pmd_entry);
1539 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1540 	spin_unlock(ptl);
1541 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1542 	return VM_FAULT_NOPAGE;
1543 
1544 fallback:
1545 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1546 	return VM_FAULT_FALLBACK;
1547 }
1548 
dax_iomap_pmd_fault(struct vm_fault * vmf,pfn_t * pfnp,const struct iomap_ops * ops)1549 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1550 			       const struct iomap_ops *ops)
1551 {
1552 	struct vm_area_struct *vma = vmf->vma;
1553 	struct address_space *mapping = vma->vm_file->f_mapping;
1554 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1555 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1556 	bool sync;
1557 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1558 	struct inode *inode = mapping->host;
1559 	vm_fault_t result = VM_FAULT_FALLBACK;
1560 	struct iomap iomap = { 0 };
1561 	pgoff_t max_pgoff, pgoff;
1562 	void *entry;
1563 	loff_t pos;
1564 	int error;
1565 	pfn_t pfn;
1566 
1567 	/*
1568 	 * Check whether offset isn't beyond end of file now. Caller is
1569 	 * supposed to hold locks serializing us with truncate / punch hole so
1570 	 * this is a reliable test.
1571 	 */
1572 	pgoff = linear_page_index(vma, pmd_addr);
1573 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1574 
1575 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1576 
1577 	/*
1578 	 * Make sure that the faulting address's PMD offset (color) matches
1579 	 * the PMD offset from the start of the file.  This is necessary so
1580 	 * that a PMD range in the page table overlaps exactly with a PMD
1581 	 * range in the radix tree.
1582 	 */
1583 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1584 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1585 		goto fallback;
1586 
1587 	/* Fall back to PTEs if we're going to COW */
1588 	if (write && !(vma->vm_flags & VM_SHARED))
1589 		goto fallback;
1590 
1591 	/* If the PMD would extend outside the VMA */
1592 	if (pmd_addr < vma->vm_start)
1593 		goto fallback;
1594 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1595 		goto fallback;
1596 
1597 	if (pgoff >= max_pgoff) {
1598 		result = VM_FAULT_SIGBUS;
1599 		goto out;
1600 	}
1601 
1602 	/* If the PMD would extend beyond the file size */
1603 	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1604 		goto fallback;
1605 
1606 	/*
1607 	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1608 	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1609 	 * is already in the tree, for instance), it will return -EEXIST and
1610 	 * we just fall back to 4k entries.
1611 	 */
1612 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1613 	if (IS_ERR(entry))
1614 		goto fallback;
1615 
1616 	/*
1617 	 * It is possible, particularly with mixed reads & writes to private
1618 	 * mappings, that we have raced with a PTE fault that overlaps with
1619 	 * the PMD we need to set up.  If so just return and the fault will be
1620 	 * retried.
1621 	 */
1622 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1623 			!pmd_devmap(*vmf->pmd)) {
1624 		result = 0;
1625 		goto unlock_entry;
1626 	}
1627 
1628 	/*
1629 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1630 	 * setting up a mapping, so really we're using iomap_begin() as a way
1631 	 * to look up our filesystem block.
1632 	 */
1633 	pos = (loff_t)pgoff << PAGE_SHIFT;
1634 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1635 	if (error)
1636 		goto unlock_entry;
1637 
1638 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1639 		goto finish_iomap;
1640 
1641 	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1642 
1643 	switch (iomap.type) {
1644 	case IOMAP_MAPPED:
1645 		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1646 		if (error < 0)
1647 			goto finish_iomap;
1648 
1649 		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1650 						RADIX_DAX_PMD, write && !sync);
1651 
1652 		/*
1653 		 * If we are doing synchronous page fault and inode needs fsync,
1654 		 * we can insert PMD into page tables only after that happens.
1655 		 * Skip insertion for now and return the pfn so that caller can
1656 		 * insert it after fsync is done.
1657 		 */
1658 		if (sync) {
1659 			if (WARN_ON_ONCE(!pfnp))
1660 				goto finish_iomap;
1661 			*pfnp = pfn;
1662 			result = VM_FAULT_NEEDDSYNC;
1663 			goto finish_iomap;
1664 		}
1665 
1666 		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1667 		result = vmf_insert_pfn_pmd(vmf, pfn, write);
1668 		break;
1669 	case IOMAP_UNWRITTEN:
1670 	case IOMAP_HOLE:
1671 		if (WARN_ON_ONCE(write))
1672 			break;
1673 		result = dax_pmd_load_hole(vmf, &iomap, entry);
1674 		break;
1675 	default:
1676 		WARN_ON_ONCE(1);
1677 		break;
1678 	}
1679 
1680  finish_iomap:
1681 	if (ops->iomap_end) {
1682 		int copied = PMD_SIZE;
1683 
1684 		if (result == VM_FAULT_FALLBACK)
1685 			copied = 0;
1686 		/*
1687 		 * The fault is done by now and there's no way back (other
1688 		 * thread may be already happily using PMD we have installed).
1689 		 * Just ignore error from ->iomap_end since we cannot do much
1690 		 * with it.
1691 		 */
1692 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1693 				&iomap);
1694 	}
1695  unlock_entry:
1696 	put_locked_mapping_entry(mapping, pgoff);
1697  fallback:
1698 	if (result == VM_FAULT_FALLBACK) {
1699 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1700 		count_vm_event(THP_FAULT_FALLBACK);
1701 	}
1702 out:
1703 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1704 	return result;
1705 }
1706 #else
dax_iomap_pmd_fault(struct vm_fault * vmf,pfn_t * pfnp,const struct iomap_ops * ops)1707 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1708 			       const struct iomap_ops *ops)
1709 {
1710 	return VM_FAULT_FALLBACK;
1711 }
1712 #endif /* CONFIG_FS_DAX_PMD */
1713 
1714 /**
1715  * dax_iomap_fault - handle a page fault on a DAX file
1716  * @vmf: The description of the fault
1717  * @pe_size: Size of the page to fault in
1718  * @pfnp: PFN to insert for synchronous faults if fsync is required
1719  * @iomap_errp: Storage for detailed error code in case of error
1720  * @ops: Iomap ops passed from the file system
1721  *
1722  * When a page fault occurs, filesystems may call this helper in
1723  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1724  * has done all the necessary locking for page fault to proceed
1725  * successfully.
1726  */
dax_iomap_fault(struct vm_fault * vmf,enum page_entry_size pe_size,pfn_t * pfnp,int * iomap_errp,const struct iomap_ops * ops)1727 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1728 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1729 {
1730 	switch (pe_size) {
1731 	case PE_SIZE_PTE:
1732 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1733 	case PE_SIZE_PMD:
1734 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1735 	default:
1736 		return VM_FAULT_FALLBACK;
1737 	}
1738 }
1739 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1740 
1741 /**
1742  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1743  * @vmf: The description of the fault
1744  * @pe_size: Size of entry to be inserted
1745  * @pfn: PFN to insert
1746  *
1747  * This function inserts writeable PTE or PMD entry into page tables for mmaped
1748  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1749  * as well.
1750  */
dax_insert_pfn_mkwrite(struct vm_fault * vmf,enum page_entry_size pe_size,pfn_t pfn)1751 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1752 				  enum page_entry_size pe_size,
1753 				  pfn_t pfn)
1754 {
1755 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1756 	void *entry, **slot;
1757 	pgoff_t index = vmf->pgoff;
1758 	vm_fault_t ret;
1759 
1760 	xa_lock_irq(&mapping->i_pages);
1761 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
1762 	/* Did we race with someone splitting entry or so? */
1763 	if (!entry ||
1764 	    (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1765 	    (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1766 		put_unlocked_mapping_entry(mapping, index, entry);
1767 		xa_unlock_irq(&mapping->i_pages);
1768 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1769 						      VM_FAULT_NOPAGE);
1770 		return VM_FAULT_NOPAGE;
1771 	}
1772 	radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1773 	entry = lock_slot(mapping, slot);
1774 	xa_unlock_irq(&mapping->i_pages);
1775 	switch (pe_size) {
1776 	case PE_SIZE_PTE:
1777 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1778 		break;
1779 #ifdef CONFIG_FS_DAX_PMD
1780 	case PE_SIZE_PMD:
1781 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1782 		break;
1783 #endif
1784 	default:
1785 		ret = VM_FAULT_FALLBACK;
1786 	}
1787 	put_locked_mapping_entry(mapping, index);
1788 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1789 	return ret;
1790 }
1791 
1792 /**
1793  * dax_finish_sync_fault - finish synchronous page fault
1794  * @vmf: The description of the fault
1795  * @pe_size: Size of entry to be inserted
1796  * @pfn: PFN to insert
1797  *
1798  * This function ensures that the file range touched by the page fault is
1799  * stored persistently on the media and handles inserting of appropriate page
1800  * table entry.
1801  */
dax_finish_sync_fault(struct vm_fault * vmf,enum page_entry_size pe_size,pfn_t pfn)1802 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1803 		enum page_entry_size pe_size, pfn_t pfn)
1804 {
1805 	int err;
1806 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1807 	size_t len = 0;
1808 
1809 	if (pe_size == PE_SIZE_PTE)
1810 		len = PAGE_SIZE;
1811 	else if (pe_size == PE_SIZE_PMD)
1812 		len = PMD_SIZE;
1813 	else
1814 		WARN_ON_ONCE(1);
1815 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1816 	if (err)
1817 		return VM_FAULT_SIGBUS;
1818 	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1819 }
1820 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1821