1 /*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016-2018 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/migrate.h>
21 #include <linux/mm.h>
22 #include <linux/mm_inline.h>
23 #include <linux/swap.h>
24 #include <linux/pagemap.h>
25 #include <linux/pagevec.h>
26 #include <linux/file.h>
27 #include <linux/uio.h>
28 #include <linux/backing-dev.h>
29 #include <linux/buffer_head.h>
30 #include <linux/task_io_accounting_ops.h>
31 #include <linux/dax.h>
32 #include <linux/sched/signal.h>
33 #include <linux/swap.h>
34
35 #include "internal.h"
36
37 /*
38 * Execute a iomap write on a segment of the mapping that spans a
39 * contiguous range of pages that have identical block mapping state.
40 *
41 * This avoids the need to map pages individually, do individual allocations
42 * for each page and most importantly avoid the need for filesystem specific
43 * locking per page. Instead, all the operations are amortised over the entire
44 * range of pages. It is assumed that the filesystems will lock whatever
45 * resources they require in the iomap_begin call, and release them in the
46 * iomap_end call.
47 */
48 loff_t
iomap_apply(struct inode * inode,loff_t pos,loff_t length,unsigned flags,const struct iomap_ops * ops,void * data,iomap_actor_t actor)49 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
50 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
51 {
52 struct iomap iomap = { 0 };
53 loff_t written = 0, ret;
54
55 /*
56 * Need to map a range from start position for length bytes. This can
57 * span multiple pages - it is only guaranteed to return a range of a
58 * single type of pages (e.g. all into a hole, all mapped or all
59 * unwritten). Failure at this point has nothing to undo.
60 *
61 * If allocation is required for this range, reserve the space now so
62 * that the allocation is guaranteed to succeed later on. Once we copy
63 * the data into the page cache pages, then we cannot fail otherwise we
64 * expose transient stale data. If the reserve fails, we can safely
65 * back out at this point as there is nothing to undo.
66 */
67 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
68 if (ret)
69 return ret;
70 if (WARN_ON(iomap.offset > pos))
71 return -EIO;
72 if (WARN_ON(iomap.length == 0))
73 return -EIO;
74
75 /*
76 * Cut down the length to the one actually provided by the filesystem,
77 * as it might not be able to give us the whole size that we requested.
78 */
79 if (iomap.offset + iomap.length < pos + length)
80 length = iomap.offset + iomap.length - pos;
81
82 /*
83 * Now that we have guaranteed that the space allocation will succeed.
84 * we can do the copy-in page by page without having to worry about
85 * failures exposing transient data.
86 */
87 written = actor(inode, pos, length, data, &iomap);
88
89 /*
90 * Now the data has been copied, commit the range we've copied. This
91 * should not fail unless the filesystem has had a fatal error.
92 */
93 if (ops->iomap_end) {
94 ret = ops->iomap_end(inode, pos, length,
95 written > 0 ? written : 0,
96 flags, &iomap);
97 }
98
99 return written ? written : ret;
100 }
101
102 static sector_t
iomap_sector(struct iomap * iomap,loff_t pos)103 iomap_sector(struct iomap *iomap, loff_t pos)
104 {
105 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
106 }
107
108 static struct iomap_page *
iomap_page_create(struct inode * inode,struct page * page)109 iomap_page_create(struct inode *inode, struct page *page)
110 {
111 struct iomap_page *iop = to_iomap_page(page);
112
113 if (iop || i_blocksize(inode) == PAGE_SIZE)
114 return iop;
115
116 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
117 atomic_set(&iop->read_count, 0);
118 atomic_set(&iop->write_count, 0);
119 spin_lock_init(&iop->uptodate_lock);
120 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
121
122 /*
123 * migrate_page_move_mapping() assumes that pages with private data have
124 * their count elevated by 1.
125 */
126 get_page(page);
127 set_page_private(page, (unsigned long)iop);
128 SetPagePrivate(page);
129 return iop;
130 }
131
132 static void
iomap_page_release(struct page * page)133 iomap_page_release(struct page *page)
134 {
135 struct iomap_page *iop = to_iomap_page(page);
136
137 if (!iop)
138 return;
139 WARN_ON_ONCE(atomic_read(&iop->read_count));
140 WARN_ON_ONCE(atomic_read(&iop->write_count));
141 ClearPagePrivate(page);
142 set_page_private(page, 0);
143 put_page(page);
144 kfree(iop);
145 }
146
147 /*
148 * Calculate the range inside the page that we actually need to read.
149 */
150 static void
iomap_adjust_read_range(struct inode * inode,struct iomap_page * iop,loff_t * pos,loff_t length,unsigned * offp,unsigned * lenp)151 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
152 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
153 {
154 loff_t orig_pos = *pos;
155 loff_t isize = i_size_read(inode);
156 unsigned block_bits = inode->i_blkbits;
157 unsigned block_size = (1 << block_bits);
158 unsigned poff = offset_in_page(*pos);
159 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
160 unsigned first = poff >> block_bits;
161 unsigned last = (poff + plen - 1) >> block_bits;
162
163 /*
164 * If the block size is smaller than the page size we need to check the
165 * per-block uptodate status and adjust the offset and length if needed
166 * to avoid reading in already uptodate ranges.
167 */
168 if (iop) {
169 unsigned int i;
170
171 /* move forward for each leading block marked uptodate */
172 for (i = first; i <= last; i++) {
173 if (!test_bit(i, iop->uptodate))
174 break;
175 *pos += block_size;
176 poff += block_size;
177 plen -= block_size;
178 first++;
179 }
180
181 /* truncate len if we find any trailing uptodate block(s) */
182 for ( ; i <= last; i++) {
183 if (test_bit(i, iop->uptodate)) {
184 plen -= (last - i + 1) * block_size;
185 last = i - 1;
186 break;
187 }
188 }
189 }
190
191 /*
192 * If the extent spans the block that contains the i_size we need to
193 * handle both halves separately so that we properly zero data in the
194 * page cache for blocks that are entirely outside of i_size.
195 */
196 if (orig_pos <= isize && orig_pos + length > isize) {
197 unsigned end = offset_in_page(isize - 1) >> block_bits;
198
199 if (first <= end && last > end)
200 plen -= (last - end) * block_size;
201 }
202
203 *offp = poff;
204 *lenp = plen;
205 }
206
207 static void
iomap_iop_set_range_uptodate(struct page * page,unsigned off,unsigned len)208 iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
209 {
210 struct iomap_page *iop = to_iomap_page(page);
211 struct inode *inode = page->mapping->host;
212 unsigned first = off >> inode->i_blkbits;
213 unsigned last = (off + len - 1) >> inode->i_blkbits;
214 bool uptodate = true;
215 unsigned long flags;
216 unsigned int i;
217
218 spin_lock_irqsave(&iop->uptodate_lock, flags);
219 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
220 if (i >= first && i <= last)
221 set_bit(i, iop->uptodate);
222 else if (!test_bit(i, iop->uptodate))
223 uptodate = false;
224 }
225
226 if (uptodate)
227 SetPageUptodate(page);
228 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
229 }
230
231 static void
iomap_set_range_uptodate(struct page * page,unsigned off,unsigned len)232 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
233 {
234 if (PageError(page))
235 return;
236
237 if (page_has_private(page))
238 iomap_iop_set_range_uptodate(page, off, len);
239 else
240 SetPageUptodate(page);
241 }
242
243 static void
iomap_read_finish(struct iomap_page * iop,struct page * page)244 iomap_read_finish(struct iomap_page *iop, struct page *page)
245 {
246 if (!iop || atomic_dec_and_test(&iop->read_count))
247 unlock_page(page);
248 }
249
250 static void
iomap_read_page_end_io(struct bio_vec * bvec,int error)251 iomap_read_page_end_io(struct bio_vec *bvec, int error)
252 {
253 struct page *page = bvec->bv_page;
254 struct iomap_page *iop = to_iomap_page(page);
255
256 if (unlikely(error)) {
257 ClearPageUptodate(page);
258 SetPageError(page);
259 } else {
260 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
261 }
262
263 iomap_read_finish(iop, page);
264 }
265
266 static void
iomap_read_inline_data(struct inode * inode,struct page * page,struct iomap * iomap)267 iomap_read_inline_data(struct inode *inode, struct page *page,
268 struct iomap *iomap)
269 {
270 size_t size = i_size_read(inode);
271 void *addr;
272
273 if (PageUptodate(page))
274 return;
275
276 BUG_ON(page->index);
277 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
278
279 addr = kmap_atomic(page);
280 memcpy(addr, iomap->inline_data, size);
281 memset(addr + size, 0, PAGE_SIZE - size);
282 kunmap_atomic(addr);
283 SetPageUptodate(page);
284 }
285
286 static void
iomap_read_end_io(struct bio * bio)287 iomap_read_end_io(struct bio *bio)
288 {
289 int error = blk_status_to_errno(bio->bi_status);
290 struct bio_vec *bvec;
291 int i;
292
293 bio_for_each_segment_all(bvec, bio, i)
294 iomap_read_page_end_io(bvec, error);
295 bio_put(bio);
296 }
297
298 struct iomap_readpage_ctx {
299 struct page *cur_page;
300 bool cur_page_in_bio;
301 bool is_readahead;
302 struct bio *bio;
303 struct list_head *pages;
304 };
305
306 static loff_t
iomap_readpage_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)307 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
308 struct iomap *iomap)
309 {
310 struct iomap_readpage_ctx *ctx = data;
311 struct page *page = ctx->cur_page;
312 struct iomap_page *iop = iomap_page_create(inode, page);
313 bool is_contig = false;
314 loff_t orig_pos = pos;
315 unsigned poff, plen;
316 sector_t sector;
317
318 if (iomap->type == IOMAP_INLINE) {
319 WARN_ON_ONCE(pos);
320 iomap_read_inline_data(inode, page, iomap);
321 return PAGE_SIZE;
322 }
323
324 /* zero post-eof blocks as the page may be mapped */
325 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
326 if (plen == 0)
327 goto done;
328
329 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
330 zero_user(page, poff, plen);
331 iomap_set_range_uptodate(page, poff, plen);
332 goto done;
333 }
334
335 ctx->cur_page_in_bio = true;
336
337 /*
338 * Try to merge into a previous segment if we can.
339 */
340 sector = iomap_sector(iomap, pos);
341 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
342 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
343 goto done;
344 is_contig = true;
345 }
346
347 /*
348 * If we start a new segment we need to increase the read count, and we
349 * need to do so before submitting any previous full bio to make sure
350 * that we don't prematurely unlock the page.
351 */
352 if (iop)
353 atomic_inc(&iop->read_count);
354
355 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
356 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
357 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
358
359 if (ctx->bio)
360 submit_bio(ctx->bio);
361
362 if (ctx->is_readahead) /* same as readahead_gfp_mask */
363 gfp |= __GFP_NORETRY | __GFP_NOWARN;
364 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
365 ctx->bio->bi_opf = REQ_OP_READ;
366 if (ctx->is_readahead)
367 ctx->bio->bi_opf |= REQ_RAHEAD;
368 ctx->bio->bi_iter.bi_sector = sector;
369 bio_set_dev(ctx->bio, iomap->bdev);
370 ctx->bio->bi_end_io = iomap_read_end_io;
371 }
372
373 __bio_add_page(ctx->bio, page, plen, poff);
374 done:
375 /*
376 * Move the caller beyond our range so that it keeps making progress.
377 * For that we have to include any leading non-uptodate ranges, but
378 * we can skip trailing ones as they will be handled in the next
379 * iteration.
380 */
381 return pos - orig_pos + plen;
382 }
383
384 int
iomap_readpage(struct page * page,const struct iomap_ops * ops)385 iomap_readpage(struct page *page, const struct iomap_ops *ops)
386 {
387 struct iomap_readpage_ctx ctx = { .cur_page = page };
388 struct inode *inode = page->mapping->host;
389 unsigned poff;
390 loff_t ret;
391
392 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
393 ret = iomap_apply(inode, page_offset(page) + poff,
394 PAGE_SIZE - poff, 0, ops, &ctx,
395 iomap_readpage_actor);
396 if (ret <= 0) {
397 WARN_ON_ONCE(ret == 0);
398 SetPageError(page);
399 break;
400 }
401 }
402
403 if (ctx.bio) {
404 submit_bio(ctx.bio);
405 WARN_ON_ONCE(!ctx.cur_page_in_bio);
406 } else {
407 WARN_ON_ONCE(ctx.cur_page_in_bio);
408 unlock_page(page);
409 }
410
411 /*
412 * Just like mpage_readpages and block_read_full_page we always
413 * return 0 and just mark the page as PageError on errors. This
414 * should be cleaned up all through the stack eventually.
415 */
416 return 0;
417 }
418 EXPORT_SYMBOL_GPL(iomap_readpage);
419
420 static struct page *
iomap_next_page(struct inode * inode,struct list_head * pages,loff_t pos,loff_t length,loff_t * done)421 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
422 loff_t length, loff_t *done)
423 {
424 while (!list_empty(pages)) {
425 struct page *page = lru_to_page(pages);
426
427 if (page_offset(page) >= (u64)pos + length)
428 break;
429
430 list_del(&page->lru);
431 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
432 GFP_NOFS))
433 return page;
434
435 /*
436 * If we already have a page in the page cache at index we are
437 * done. Upper layers don't care if it is uptodate after the
438 * readpages call itself as every page gets checked again once
439 * actually needed.
440 */
441 *done += PAGE_SIZE;
442 put_page(page);
443 }
444
445 return NULL;
446 }
447
448 static loff_t
iomap_readpages_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)449 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
450 void *data, struct iomap *iomap)
451 {
452 struct iomap_readpage_ctx *ctx = data;
453 loff_t done, ret;
454
455 for (done = 0; done < length; done += ret) {
456 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
457 if (!ctx->cur_page_in_bio)
458 unlock_page(ctx->cur_page);
459 put_page(ctx->cur_page);
460 ctx->cur_page = NULL;
461 }
462 if (!ctx->cur_page) {
463 ctx->cur_page = iomap_next_page(inode, ctx->pages,
464 pos, length, &done);
465 if (!ctx->cur_page)
466 break;
467 ctx->cur_page_in_bio = false;
468 }
469 ret = iomap_readpage_actor(inode, pos + done, length - done,
470 ctx, iomap);
471 }
472
473 return done;
474 }
475
476 int
iomap_readpages(struct address_space * mapping,struct list_head * pages,unsigned nr_pages,const struct iomap_ops * ops)477 iomap_readpages(struct address_space *mapping, struct list_head *pages,
478 unsigned nr_pages, const struct iomap_ops *ops)
479 {
480 struct iomap_readpage_ctx ctx = {
481 .pages = pages,
482 .is_readahead = true,
483 };
484 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
485 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
486 loff_t length = last - pos + PAGE_SIZE, ret = 0;
487
488 while (length > 0) {
489 ret = iomap_apply(mapping->host, pos, length, 0, ops,
490 &ctx, iomap_readpages_actor);
491 if (ret <= 0) {
492 WARN_ON_ONCE(ret == 0);
493 goto done;
494 }
495 pos += ret;
496 length -= ret;
497 }
498 ret = 0;
499 done:
500 if (ctx.bio)
501 submit_bio(ctx.bio);
502 if (ctx.cur_page) {
503 if (!ctx.cur_page_in_bio)
504 unlock_page(ctx.cur_page);
505 put_page(ctx.cur_page);
506 }
507
508 /*
509 * Check that we didn't lose a page due to the arcance calling
510 * conventions..
511 */
512 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
513 return ret;
514 }
515 EXPORT_SYMBOL_GPL(iomap_readpages);
516
517 /*
518 * iomap_is_partially_uptodate checks whether blocks within a page are
519 * uptodate or not.
520 *
521 * Returns true if all blocks which correspond to a file portion
522 * we want to read within the page are uptodate.
523 */
524 int
iomap_is_partially_uptodate(struct page * page,unsigned long from,unsigned long count)525 iomap_is_partially_uptodate(struct page *page, unsigned long from,
526 unsigned long count)
527 {
528 struct iomap_page *iop = to_iomap_page(page);
529 struct inode *inode = page->mapping->host;
530 unsigned len, first, last;
531 unsigned i;
532
533 /* Limit range to one page */
534 len = min_t(unsigned, PAGE_SIZE - from, count);
535
536 /* First and last blocks in range within page */
537 first = from >> inode->i_blkbits;
538 last = (from + len - 1) >> inode->i_blkbits;
539
540 if (iop) {
541 for (i = first; i <= last; i++)
542 if (!test_bit(i, iop->uptodate))
543 return 0;
544 return 1;
545 }
546
547 return 0;
548 }
549 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
550
551 int
iomap_releasepage(struct page * page,gfp_t gfp_mask)552 iomap_releasepage(struct page *page, gfp_t gfp_mask)
553 {
554 /*
555 * mm accommodates an old ext3 case where clean pages might not have had
556 * the dirty bit cleared. Thus, it can send actual dirty pages to
557 * ->releasepage() via shrink_active_list(), skip those here.
558 */
559 if (PageDirty(page) || PageWriteback(page))
560 return 0;
561 iomap_page_release(page);
562 return 1;
563 }
564 EXPORT_SYMBOL_GPL(iomap_releasepage);
565
566 void
iomap_invalidatepage(struct page * page,unsigned int offset,unsigned int len)567 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
568 {
569 /*
570 * If we are invalidating the entire page, clear the dirty state from it
571 * and release it to avoid unnecessary buildup of the LRU.
572 */
573 if (offset == 0 && len == PAGE_SIZE) {
574 WARN_ON_ONCE(PageWriteback(page));
575 cancel_dirty_page(page);
576 iomap_page_release(page);
577 }
578 }
579 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
580
581 #ifdef CONFIG_MIGRATION
582 int
iomap_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)583 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
584 struct page *page, enum migrate_mode mode)
585 {
586 int ret;
587
588 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
589 if (ret != MIGRATEPAGE_SUCCESS)
590 return ret;
591
592 if (page_has_private(page)) {
593 ClearPagePrivate(page);
594 get_page(newpage);
595 set_page_private(newpage, page_private(page));
596 set_page_private(page, 0);
597 put_page(page);
598 SetPagePrivate(newpage);
599 }
600
601 if (mode != MIGRATE_SYNC_NO_COPY)
602 migrate_page_copy(newpage, page);
603 else
604 migrate_page_states(newpage, page);
605 return MIGRATEPAGE_SUCCESS;
606 }
607 EXPORT_SYMBOL_GPL(iomap_migrate_page);
608 #endif /* CONFIG_MIGRATION */
609
610 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)611 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
612 {
613 loff_t i_size = i_size_read(inode);
614
615 /*
616 * Only truncate newly allocated pages beyoned EOF, even if the
617 * write started inside the existing inode size.
618 */
619 if (pos + len > i_size)
620 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
621 }
622
623 static int
iomap_read_page_sync(struct inode * inode,loff_t block_start,struct page * page,unsigned poff,unsigned plen,unsigned from,unsigned to,struct iomap * iomap)624 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
625 unsigned poff, unsigned plen, unsigned from, unsigned to,
626 struct iomap *iomap)
627 {
628 struct bio_vec bvec;
629 struct bio bio;
630
631 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
632 zero_user_segments(page, poff, from, to, poff + plen);
633 iomap_set_range_uptodate(page, poff, plen);
634 return 0;
635 }
636
637 bio_init(&bio, &bvec, 1);
638 bio.bi_opf = REQ_OP_READ;
639 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
640 bio_set_dev(&bio, iomap->bdev);
641 __bio_add_page(&bio, page, plen, poff);
642 return submit_bio_wait(&bio);
643 }
644
645 static int
__iomap_write_begin(struct inode * inode,loff_t pos,unsigned len,struct page * page,struct iomap * iomap)646 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
647 struct page *page, struct iomap *iomap)
648 {
649 struct iomap_page *iop = iomap_page_create(inode, page);
650 loff_t block_size = i_blocksize(inode);
651 loff_t block_start = pos & ~(block_size - 1);
652 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
653 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
654 int status = 0;
655
656 if (PageUptodate(page))
657 return 0;
658
659 do {
660 iomap_adjust_read_range(inode, iop, &block_start,
661 block_end - block_start, &poff, &plen);
662 if (plen == 0)
663 break;
664
665 if ((from > poff && from < poff + plen) ||
666 (to > poff && to < poff + plen)) {
667 status = iomap_read_page_sync(inode, block_start, page,
668 poff, plen, from, to, iomap);
669 if (status)
670 break;
671 }
672
673 } while ((block_start += plen) < block_end);
674
675 return status;
676 }
677
678 static int
iomap_write_begin(struct inode * inode,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,struct iomap * iomap)679 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
680 struct page **pagep, struct iomap *iomap)
681 {
682 pgoff_t index = pos >> PAGE_SHIFT;
683 struct page *page;
684 int status = 0;
685
686 BUG_ON(pos + len > iomap->offset + iomap->length);
687
688 if (fatal_signal_pending(current))
689 return -EINTR;
690
691 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
692 if (!page)
693 return -ENOMEM;
694
695 if (iomap->type == IOMAP_INLINE)
696 iomap_read_inline_data(inode, page, iomap);
697 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
698 status = __block_write_begin_int(page, pos, len, NULL, iomap);
699 else
700 status = __iomap_write_begin(inode, pos, len, page, iomap);
701 if (unlikely(status)) {
702 unlock_page(page);
703 put_page(page);
704 page = NULL;
705
706 iomap_write_failed(inode, pos, len);
707 }
708
709 *pagep = page;
710 return status;
711 }
712
713 int
iomap_set_page_dirty(struct page * page)714 iomap_set_page_dirty(struct page *page)
715 {
716 struct address_space *mapping = page_mapping(page);
717 int newly_dirty;
718
719 if (unlikely(!mapping))
720 return !TestSetPageDirty(page);
721
722 /*
723 * Lock out page->mem_cgroup migration to keep PageDirty
724 * synchronized with per-memcg dirty page counters.
725 */
726 lock_page_memcg(page);
727 newly_dirty = !TestSetPageDirty(page);
728 if (newly_dirty)
729 __set_page_dirty(page, mapping, 0);
730 unlock_page_memcg(page);
731
732 if (newly_dirty)
733 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
734 return newly_dirty;
735 }
736 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
737
738 static int
__iomap_write_end(struct inode * inode,loff_t pos,unsigned len,unsigned copied,struct page * page,struct iomap * iomap)739 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
740 unsigned copied, struct page *page, struct iomap *iomap)
741 {
742 flush_dcache_page(page);
743
744 /*
745 * The blocks that were entirely written will now be uptodate, so we
746 * don't have to worry about a readpage reading them and overwriting a
747 * partial write. However if we have encountered a short write and only
748 * partially written into a block, it will not be marked uptodate, so a
749 * readpage might come in and destroy our partial write.
750 *
751 * Do the simplest thing, and just treat any short write to a non
752 * uptodate page as a zero-length write, and force the caller to redo
753 * the whole thing.
754 */
755 if (unlikely(copied < len && !PageUptodate(page))) {
756 copied = 0;
757 } else {
758 iomap_set_range_uptodate(page, offset_in_page(pos), len);
759 iomap_set_page_dirty(page);
760 }
761 return __generic_write_end(inode, pos, copied, page);
762 }
763
764 static int
iomap_write_end_inline(struct inode * inode,struct page * page,struct iomap * iomap,loff_t pos,unsigned copied)765 iomap_write_end_inline(struct inode *inode, struct page *page,
766 struct iomap *iomap, loff_t pos, unsigned copied)
767 {
768 void *addr;
769
770 WARN_ON_ONCE(!PageUptodate(page));
771 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
772
773 addr = kmap_atomic(page);
774 memcpy(iomap->inline_data + pos, addr + pos, copied);
775 kunmap_atomic(addr);
776
777 mark_inode_dirty(inode);
778 __generic_write_end(inode, pos, copied, page);
779 return copied;
780 }
781
782 static int
iomap_write_end(struct inode * inode,loff_t pos,unsigned len,unsigned copied,struct page * page,struct iomap * iomap)783 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
784 unsigned copied, struct page *page, struct iomap *iomap)
785 {
786 int ret;
787
788 if (iomap->type == IOMAP_INLINE) {
789 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
790 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
791 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
792 copied, page, NULL);
793 } else {
794 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
795 }
796
797 if (iomap->page_done)
798 iomap->page_done(inode, pos, copied, page, iomap);
799
800 if (ret < len)
801 iomap_write_failed(inode, pos, len);
802 return ret;
803 }
804
805 static loff_t
iomap_write_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)806 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
807 struct iomap *iomap)
808 {
809 struct iov_iter *i = data;
810 long status = 0;
811 ssize_t written = 0;
812 unsigned int flags = AOP_FLAG_NOFS;
813
814 do {
815 struct page *page;
816 unsigned long offset; /* Offset into pagecache page */
817 unsigned long bytes; /* Bytes to write to page */
818 size_t copied; /* Bytes copied from user */
819
820 offset = offset_in_page(pos);
821 bytes = min_t(unsigned long, PAGE_SIZE - offset,
822 iov_iter_count(i));
823 again:
824 if (bytes > length)
825 bytes = length;
826
827 /*
828 * Bring in the user page that we will copy from _first_.
829 * Otherwise there's a nasty deadlock on copying from the
830 * same page as we're writing to, without it being marked
831 * up-to-date.
832 *
833 * Not only is this an optimisation, but it is also required
834 * to check that the address is actually valid, when atomic
835 * usercopies are used, below.
836 */
837 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
838 status = -EFAULT;
839 break;
840 }
841
842 status = iomap_write_begin(inode, pos, bytes, flags, &page,
843 iomap);
844 if (unlikely(status))
845 break;
846
847 if (mapping_writably_mapped(inode->i_mapping))
848 flush_dcache_page(page);
849
850 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
851
852 flush_dcache_page(page);
853
854 status = iomap_write_end(inode, pos, bytes, copied, page,
855 iomap);
856 if (unlikely(status < 0))
857 break;
858 copied = status;
859
860 cond_resched();
861
862 iov_iter_advance(i, copied);
863 if (unlikely(copied == 0)) {
864 /*
865 * If we were unable to copy any data at all, we must
866 * fall back to a single segment length write.
867 *
868 * If we didn't fallback here, we could livelock
869 * because not all segments in the iov can be copied at
870 * once without a pagefault.
871 */
872 bytes = min_t(unsigned long, PAGE_SIZE - offset,
873 iov_iter_single_seg_count(i));
874 goto again;
875 }
876 pos += copied;
877 written += copied;
878 length -= copied;
879
880 balance_dirty_pages_ratelimited(inode->i_mapping);
881 } while (iov_iter_count(i) && length);
882
883 return written ? written : status;
884 }
885
886 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops)887 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
888 const struct iomap_ops *ops)
889 {
890 struct inode *inode = iocb->ki_filp->f_mapping->host;
891 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
892
893 while (iov_iter_count(iter)) {
894 ret = iomap_apply(inode, pos, iov_iter_count(iter),
895 IOMAP_WRITE, ops, iter, iomap_write_actor);
896 if (ret <= 0)
897 break;
898 pos += ret;
899 written += ret;
900 }
901
902 return written ? written : ret;
903 }
904 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
905
906 static struct page *
__iomap_read_page(struct inode * inode,loff_t offset)907 __iomap_read_page(struct inode *inode, loff_t offset)
908 {
909 struct address_space *mapping = inode->i_mapping;
910 struct page *page;
911
912 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
913 if (IS_ERR(page))
914 return page;
915 if (!PageUptodate(page)) {
916 put_page(page);
917 return ERR_PTR(-EIO);
918 }
919 return page;
920 }
921
922 static loff_t
iomap_dirty_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)923 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
924 struct iomap *iomap)
925 {
926 long status = 0;
927 ssize_t written = 0;
928
929 do {
930 struct page *page, *rpage;
931 unsigned long offset; /* Offset into pagecache page */
932 unsigned long bytes; /* Bytes to write to page */
933
934 offset = offset_in_page(pos);
935 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
936
937 rpage = __iomap_read_page(inode, pos);
938 if (IS_ERR(rpage))
939 return PTR_ERR(rpage);
940
941 status = iomap_write_begin(inode, pos, bytes,
942 AOP_FLAG_NOFS, &page, iomap);
943 put_page(rpage);
944 if (unlikely(status))
945 return status;
946
947 WARN_ON_ONCE(!PageUptodate(page));
948
949 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
950 if (unlikely(status <= 0)) {
951 if (WARN_ON_ONCE(status == 0))
952 return -EIO;
953 return status;
954 }
955
956 cond_resched();
957
958 pos += status;
959 written += status;
960 length -= status;
961
962 balance_dirty_pages_ratelimited(inode->i_mapping);
963 } while (length);
964
965 return written;
966 }
967
968 int
iomap_file_dirty(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)969 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
970 const struct iomap_ops *ops)
971 {
972 loff_t ret;
973
974 while (len) {
975 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
976 iomap_dirty_actor);
977 if (ret <= 0)
978 return ret;
979 pos += ret;
980 len -= ret;
981 }
982
983 return 0;
984 }
985 EXPORT_SYMBOL_GPL(iomap_file_dirty);
986
iomap_zero(struct inode * inode,loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)987 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
988 unsigned bytes, struct iomap *iomap)
989 {
990 struct page *page;
991 int status;
992
993 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
994 iomap);
995 if (status)
996 return status;
997
998 zero_user(page, offset, bytes);
999 mark_page_accessed(page);
1000
1001 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
1002 }
1003
iomap_dax_zero(loff_t pos,unsigned offset,unsigned bytes,struct iomap * iomap)1004 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
1005 struct iomap *iomap)
1006 {
1007 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
1008 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
1009 }
1010
1011 static loff_t
iomap_zero_range_actor(struct inode * inode,loff_t pos,loff_t count,void * data,struct iomap * iomap)1012 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
1013 void *data, struct iomap *iomap)
1014 {
1015 bool *did_zero = data;
1016 loff_t written = 0;
1017 int status;
1018
1019 /* already zeroed? we're done. */
1020 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1021 return count;
1022
1023 do {
1024 unsigned offset, bytes;
1025
1026 offset = offset_in_page(pos);
1027 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
1028
1029 if (IS_DAX(inode))
1030 status = iomap_dax_zero(pos, offset, bytes, iomap);
1031 else
1032 status = iomap_zero(inode, pos, offset, bytes, iomap);
1033 if (status < 0)
1034 return status;
1035
1036 pos += bytes;
1037 count -= bytes;
1038 written += bytes;
1039 if (did_zero)
1040 *did_zero = true;
1041 } while (count > 0);
1042
1043 return written;
1044 }
1045
1046 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1047 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1048 const struct iomap_ops *ops)
1049 {
1050 loff_t ret;
1051
1052 while (len > 0) {
1053 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1054 ops, did_zero, iomap_zero_range_actor);
1055 if (ret <= 0)
1056 return ret;
1057
1058 pos += ret;
1059 len -= ret;
1060 }
1061
1062 return 0;
1063 }
1064 EXPORT_SYMBOL_GPL(iomap_zero_range);
1065
1066 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1067 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1068 const struct iomap_ops *ops)
1069 {
1070 unsigned int blocksize = i_blocksize(inode);
1071 unsigned int off = pos & (blocksize - 1);
1072
1073 /* Block boundary? Nothing to do */
1074 if (!off)
1075 return 0;
1076 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1077 }
1078 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1079
1080 static loff_t
iomap_page_mkwrite_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1081 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1082 void *data, struct iomap *iomap)
1083 {
1084 struct page *page = data;
1085 int ret;
1086
1087 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1088 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1089 if (ret)
1090 return ret;
1091 block_commit_write(page, 0, length);
1092 } else {
1093 WARN_ON_ONCE(!PageUptodate(page));
1094 iomap_page_create(inode, page);
1095 set_page_dirty(page);
1096 }
1097
1098 return length;
1099 }
1100
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1101 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1102 {
1103 struct page *page = vmf->page;
1104 struct inode *inode = file_inode(vmf->vma->vm_file);
1105 unsigned long length;
1106 loff_t offset, size;
1107 ssize_t ret;
1108
1109 lock_page(page);
1110 size = i_size_read(inode);
1111 if ((page->mapping != inode->i_mapping) ||
1112 (page_offset(page) > size)) {
1113 /* We overload EFAULT to mean page got truncated */
1114 ret = -EFAULT;
1115 goto out_unlock;
1116 }
1117
1118 /* page is wholly or partially inside EOF */
1119 if (((page->index + 1) << PAGE_SHIFT) > size)
1120 length = offset_in_page(size);
1121 else
1122 length = PAGE_SIZE;
1123
1124 offset = page_offset(page);
1125 while (length > 0) {
1126 ret = iomap_apply(inode, offset, length,
1127 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1128 iomap_page_mkwrite_actor);
1129 if (unlikely(ret <= 0))
1130 goto out_unlock;
1131 offset += ret;
1132 length -= ret;
1133 }
1134
1135 wait_for_stable_page(page);
1136 return VM_FAULT_LOCKED;
1137 out_unlock:
1138 unlock_page(page);
1139 return block_page_mkwrite_return(ret);
1140 }
1141 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1142
1143 struct fiemap_ctx {
1144 struct fiemap_extent_info *fi;
1145 struct iomap prev;
1146 };
1147
iomap_to_fiemap(struct fiemap_extent_info * fi,struct iomap * iomap,u32 flags)1148 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1149 struct iomap *iomap, u32 flags)
1150 {
1151 switch (iomap->type) {
1152 case IOMAP_HOLE:
1153 /* skip holes */
1154 return 0;
1155 case IOMAP_DELALLOC:
1156 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1157 break;
1158 case IOMAP_MAPPED:
1159 break;
1160 case IOMAP_UNWRITTEN:
1161 flags |= FIEMAP_EXTENT_UNWRITTEN;
1162 break;
1163 case IOMAP_INLINE:
1164 flags |= FIEMAP_EXTENT_DATA_INLINE;
1165 break;
1166 }
1167
1168 if (iomap->flags & IOMAP_F_MERGED)
1169 flags |= FIEMAP_EXTENT_MERGED;
1170 if (iomap->flags & IOMAP_F_SHARED)
1171 flags |= FIEMAP_EXTENT_SHARED;
1172
1173 return fiemap_fill_next_extent(fi, iomap->offset,
1174 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
1175 iomap->length, flags);
1176 }
1177
1178 static loff_t
iomap_fiemap_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1179 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1180 struct iomap *iomap)
1181 {
1182 struct fiemap_ctx *ctx = data;
1183 loff_t ret = length;
1184
1185 if (iomap->type == IOMAP_HOLE)
1186 return length;
1187
1188 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1189 ctx->prev = *iomap;
1190 switch (ret) {
1191 case 0: /* success */
1192 return length;
1193 case 1: /* extent array full */
1194 return 0;
1195 default:
1196 return ret;
1197 }
1198 }
1199
iomap_fiemap(struct inode * inode,struct fiemap_extent_info * fi,loff_t start,loff_t len,const struct iomap_ops * ops)1200 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
1201 loff_t start, loff_t len, const struct iomap_ops *ops)
1202 {
1203 struct fiemap_ctx ctx;
1204 loff_t ret;
1205
1206 memset(&ctx, 0, sizeof(ctx));
1207 ctx.fi = fi;
1208 ctx.prev.type = IOMAP_HOLE;
1209
1210 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1211 if (ret)
1212 return ret;
1213
1214 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1215 ret = filemap_write_and_wait(inode->i_mapping);
1216 if (ret)
1217 return ret;
1218 }
1219
1220 while (len > 0) {
1221 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
1222 iomap_fiemap_actor);
1223 /* inode with no (attribute) mapping will give ENOENT */
1224 if (ret == -ENOENT)
1225 break;
1226 if (ret < 0)
1227 return ret;
1228 if (ret == 0)
1229 break;
1230
1231 start += ret;
1232 len -= ret;
1233 }
1234
1235 if (ctx.prev.type != IOMAP_HOLE) {
1236 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1237 if (ret < 0)
1238 return ret;
1239 }
1240
1241 return 0;
1242 }
1243 EXPORT_SYMBOL_GPL(iomap_fiemap);
1244
1245 /*
1246 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
1247 * Returns true if found and updates @lastoff to the offset in file.
1248 */
1249 static bool
page_seek_hole_data(struct inode * inode,struct page * page,loff_t * lastoff,int whence)1250 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1251 int whence)
1252 {
1253 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1254 unsigned int bsize = i_blocksize(inode), off;
1255 bool seek_data = whence == SEEK_DATA;
1256 loff_t poff = page_offset(page);
1257
1258 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1259 return false;
1260
1261 if (*lastoff < poff) {
1262 /*
1263 * Last offset smaller than the start of the page means we found
1264 * a hole:
1265 */
1266 if (whence == SEEK_HOLE)
1267 return true;
1268 *lastoff = poff;
1269 }
1270
1271 /*
1272 * Just check the page unless we can and should check block ranges:
1273 */
1274 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1275 return PageUptodate(page) == seek_data;
1276
1277 lock_page(page);
1278 if (unlikely(page->mapping != inode->i_mapping))
1279 goto out_unlock_not_found;
1280
1281 for (off = 0; off < PAGE_SIZE; off += bsize) {
1282 if (offset_in_page(*lastoff) >= off + bsize)
1283 continue;
1284 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1285 unlock_page(page);
1286 return true;
1287 }
1288 *lastoff = poff + off + bsize;
1289 }
1290
1291 out_unlock_not_found:
1292 unlock_page(page);
1293 return false;
1294 }
1295
1296 /*
1297 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1298 *
1299 * Within unwritten extents, the page cache determines which parts are holes
1300 * and which are data: uptodate buffer heads count as data; everything else
1301 * counts as a hole.
1302 *
1303 * Returns the resulting offset on successs, and -ENOENT otherwise.
1304 */
1305 static loff_t
page_cache_seek_hole_data(struct inode * inode,loff_t offset,loff_t length,int whence)1306 page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1307 int whence)
1308 {
1309 pgoff_t index = offset >> PAGE_SHIFT;
1310 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1311 loff_t lastoff = offset;
1312 struct pagevec pvec;
1313
1314 if (length <= 0)
1315 return -ENOENT;
1316
1317 pagevec_init(&pvec);
1318
1319 do {
1320 unsigned nr_pages, i;
1321
1322 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1323 end - 1);
1324 if (nr_pages == 0)
1325 break;
1326
1327 for (i = 0; i < nr_pages; i++) {
1328 struct page *page = pvec.pages[i];
1329
1330 if (page_seek_hole_data(inode, page, &lastoff, whence))
1331 goto check_range;
1332 lastoff = page_offset(page) + PAGE_SIZE;
1333 }
1334 pagevec_release(&pvec);
1335 } while (index < end);
1336
1337 /* When no page at lastoff and we are not done, we found a hole. */
1338 if (whence != SEEK_HOLE)
1339 goto not_found;
1340
1341 check_range:
1342 if (lastoff < offset + length)
1343 goto out;
1344 not_found:
1345 lastoff = -ENOENT;
1346 out:
1347 pagevec_release(&pvec);
1348 return lastoff;
1349 }
1350
1351
1352 static loff_t
iomap_seek_hole_actor(struct inode * inode,loff_t offset,loff_t length,void * data,struct iomap * iomap)1353 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1354 void *data, struct iomap *iomap)
1355 {
1356 switch (iomap->type) {
1357 case IOMAP_UNWRITTEN:
1358 offset = page_cache_seek_hole_data(inode, offset, length,
1359 SEEK_HOLE);
1360 if (offset < 0)
1361 return length;
1362 /* fall through */
1363 case IOMAP_HOLE:
1364 *(loff_t *)data = offset;
1365 return 0;
1366 default:
1367 return length;
1368 }
1369 }
1370
1371 loff_t
iomap_seek_hole(struct inode * inode,loff_t offset,const struct iomap_ops * ops)1372 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1373 {
1374 loff_t size = i_size_read(inode);
1375 loff_t length = size - offset;
1376 loff_t ret;
1377
1378 /* Nothing to be found before or beyond the end of the file. */
1379 if (offset < 0 || offset >= size)
1380 return -ENXIO;
1381
1382 while (length > 0) {
1383 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1384 &offset, iomap_seek_hole_actor);
1385 if (ret < 0)
1386 return ret;
1387 if (ret == 0)
1388 break;
1389
1390 offset += ret;
1391 length -= ret;
1392 }
1393
1394 return offset;
1395 }
1396 EXPORT_SYMBOL_GPL(iomap_seek_hole);
1397
1398 static loff_t
iomap_seek_data_actor(struct inode * inode,loff_t offset,loff_t length,void * data,struct iomap * iomap)1399 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1400 void *data, struct iomap *iomap)
1401 {
1402 switch (iomap->type) {
1403 case IOMAP_HOLE:
1404 return length;
1405 case IOMAP_UNWRITTEN:
1406 offset = page_cache_seek_hole_data(inode, offset, length,
1407 SEEK_DATA);
1408 if (offset < 0)
1409 return length;
1410 /*FALLTHRU*/
1411 default:
1412 *(loff_t *)data = offset;
1413 return 0;
1414 }
1415 }
1416
1417 loff_t
iomap_seek_data(struct inode * inode,loff_t offset,const struct iomap_ops * ops)1418 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1419 {
1420 loff_t size = i_size_read(inode);
1421 loff_t length = size - offset;
1422 loff_t ret;
1423
1424 /* Nothing to be found before or beyond the end of the file. */
1425 if (offset < 0 || offset >= size)
1426 return -ENXIO;
1427
1428 while (length > 0) {
1429 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1430 &offset, iomap_seek_data_actor);
1431 if (ret < 0)
1432 return ret;
1433 if (ret == 0)
1434 break;
1435
1436 offset += ret;
1437 length -= ret;
1438 }
1439
1440 if (length <= 0)
1441 return -ENXIO;
1442 return offset;
1443 }
1444 EXPORT_SYMBOL_GPL(iomap_seek_data);
1445
1446 /*
1447 * Private flags for iomap_dio, must not overlap with the public ones in
1448 * iomap.h:
1449 */
1450 #define IOMAP_DIO_WRITE_FUA (1 << 28)
1451 #define IOMAP_DIO_NEED_SYNC (1 << 29)
1452 #define IOMAP_DIO_WRITE (1 << 30)
1453 #define IOMAP_DIO_DIRTY (1 << 31)
1454
1455 struct iomap_dio {
1456 struct kiocb *iocb;
1457 iomap_dio_end_io_t *end_io;
1458 loff_t i_size;
1459 loff_t size;
1460 atomic_t ref;
1461 unsigned flags;
1462 int error;
1463 bool wait_for_completion;
1464
1465 union {
1466 /* used during submission and for synchronous completion: */
1467 struct {
1468 struct iov_iter *iter;
1469 struct task_struct *waiter;
1470 struct request_queue *last_queue;
1471 blk_qc_t cookie;
1472 } submit;
1473
1474 /* used for aio completion: */
1475 struct {
1476 struct work_struct work;
1477 } aio;
1478 };
1479 };
1480
iomap_dio_complete(struct iomap_dio * dio)1481 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1482 {
1483 struct kiocb *iocb = dio->iocb;
1484 struct inode *inode = file_inode(iocb->ki_filp);
1485 loff_t offset = iocb->ki_pos;
1486 ssize_t ret;
1487
1488 if (dio->end_io) {
1489 ret = dio->end_io(iocb,
1490 dio->error ? dio->error : dio->size,
1491 dio->flags);
1492 } else {
1493 ret = dio->error;
1494 }
1495
1496 if (likely(!ret)) {
1497 ret = dio->size;
1498 /* check for short read */
1499 if (offset + ret > dio->i_size &&
1500 !(dio->flags & IOMAP_DIO_WRITE))
1501 ret = dio->i_size - offset;
1502 iocb->ki_pos += ret;
1503 }
1504
1505 /*
1506 * Try again to invalidate clean pages which might have been cached by
1507 * non-direct readahead, or faulted in by get_user_pages() if the source
1508 * of the write was an mmap'ed region of the file we're writing. Either
1509 * one is a pretty crazy thing to do, so we don't support it 100%. If
1510 * this invalidation fails, tough, the write still worked...
1511 *
1512 * And this page cache invalidation has to be after dio->end_io(), as
1513 * some filesystems convert unwritten extents to real allocations in
1514 * end_io() when necessary, otherwise a racing buffer read would cache
1515 * zeros from unwritten extents.
1516 */
1517 if (!dio->error &&
1518 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1519 int err;
1520 err = invalidate_inode_pages2_range(inode->i_mapping,
1521 offset >> PAGE_SHIFT,
1522 (offset + dio->size - 1) >> PAGE_SHIFT);
1523 if (err)
1524 dio_warn_stale_pagecache(iocb->ki_filp);
1525 }
1526
1527 /*
1528 * If this is a DSYNC write, make sure we push it to stable storage now
1529 * that we've written data.
1530 */
1531 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1532 ret = generic_write_sync(iocb, ret);
1533
1534 inode_dio_end(file_inode(iocb->ki_filp));
1535 kfree(dio);
1536
1537 return ret;
1538 }
1539
iomap_dio_complete_work(struct work_struct * work)1540 static void iomap_dio_complete_work(struct work_struct *work)
1541 {
1542 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1543 struct kiocb *iocb = dio->iocb;
1544
1545 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
1546 }
1547
1548 /*
1549 * Set an error in the dio if none is set yet. We have to use cmpxchg
1550 * as the submission context and the completion context(s) can race to
1551 * update the error.
1552 */
iomap_dio_set_error(struct iomap_dio * dio,int ret)1553 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1554 {
1555 cmpxchg(&dio->error, 0, ret);
1556 }
1557
iomap_dio_bio_end_io(struct bio * bio)1558 static void iomap_dio_bio_end_io(struct bio *bio)
1559 {
1560 struct iomap_dio *dio = bio->bi_private;
1561 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1562
1563 if (bio->bi_status)
1564 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
1565
1566 if (atomic_dec_and_test(&dio->ref)) {
1567 if (dio->wait_for_completion) {
1568 struct task_struct *waiter = dio->submit.waiter;
1569 WRITE_ONCE(dio->submit.waiter, NULL);
1570 wake_up_process(waiter);
1571 } else if (dio->flags & IOMAP_DIO_WRITE) {
1572 struct inode *inode = file_inode(dio->iocb->ki_filp);
1573
1574 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1575 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1576 } else {
1577 iomap_dio_complete_work(&dio->aio.work);
1578 }
1579 }
1580
1581 if (should_dirty) {
1582 bio_check_pages_dirty(bio);
1583 } else {
1584 struct bio_vec *bvec;
1585 int i;
1586
1587 bio_for_each_segment_all(bvec, bio, i)
1588 put_page(bvec->bv_page);
1589 bio_put(bio);
1590 }
1591 }
1592
1593 static blk_qc_t
iomap_dio_zero(struct iomap_dio * dio,struct iomap * iomap,loff_t pos,unsigned len)1594 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1595 unsigned len)
1596 {
1597 struct page *page = ZERO_PAGE(0);
1598 struct bio *bio;
1599
1600 bio = bio_alloc(GFP_KERNEL, 1);
1601 bio_set_dev(bio, iomap->bdev);
1602 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1603 bio->bi_private = dio;
1604 bio->bi_end_io = iomap_dio_bio_end_io;
1605
1606 get_page(page);
1607 __bio_add_page(bio, page, len, 0);
1608 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
1609
1610 atomic_inc(&dio->ref);
1611 return submit_bio(bio);
1612 }
1613
1614 static loff_t
iomap_dio_bio_actor(struct inode * inode,loff_t pos,loff_t length,struct iomap_dio * dio,struct iomap * iomap)1615 iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1616 struct iomap_dio *dio, struct iomap *iomap)
1617 {
1618 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1619 unsigned int fs_block_size = i_blocksize(inode), pad;
1620 unsigned int align = iov_iter_alignment(dio->submit.iter);
1621 struct iov_iter iter;
1622 struct bio *bio;
1623 bool need_zeroout = false;
1624 bool use_fua = false;
1625 int nr_pages, ret = 0;
1626 size_t copied = 0;
1627
1628 if ((pos | length | align) & ((1 << blkbits) - 1))
1629 return -EINVAL;
1630
1631 if (iomap->type == IOMAP_UNWRITTEN) {
1632 dio->flags |= IOMAP_DIO_UNWRITTEN;
1633 need_zeroout = true;
1634 }
1635
1636 if (iomap->flags & IOMAP_F_SHARED)
1637 dio->flags |= IOMAP_DIO_COW;
1638
1639 if (iomap->flags & IOMAP_F_NEW) {
1640 need_zeroout = true;
1641 } else if (iomap->type == IOMAP_MAPPED) {
1642 /*
1643 * Use a FUA write if we need datasync semantics, this is a pure
1644 * data IO that doesn't require any metadata updates (including
1645 * after IO completion such as unwritten extent conversion) and
1646 * the underlying device supports FUA. This allows us to avoid
1647 * cache flushes on IO completion.
1648 */
1649 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1650 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1651 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1652 use_fua = true;
1653 }
1654
1655 /*
1656 * Operate on a partial iter trimmed to the extent we were called for.
1657 * We'll update the iter in the dio once we're done with this extent.
1658 */
1659 iter = *dio->submit.iter;
1660 iov_iter_truncate(&iter, length);
1661
1662 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1663 if (nr_pages <= 0)
1664 return nr_pages;
1665
1666 if (need_zeroout) {
1667 /* zero out from the start of the block to the write offset */
1668 pad = pos & (fs_block_size - 1);
1669 if (pad)
1670 iomap_dio_zero(dio, iomap, pos - pad, pad);
1671 }
1672
1673 do {
1674 size_t n;
1675 if (dio->error) {
1676 iov_iter_revert(dio->submit.iter, copied);
1677 return 0;
1678 }
1679
1680 bio = bio_alloc(GFP_KERNEL, nr_pages);
1681 bio_set_dev(bio, iomap->bdev);
1682 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
1683 bio->bi_write_hint = dio->iocb->ki_hint;
1684 bio->bi_ioprio = dio->iocb->ki_ioprio;
1685 bio->bi_private = dio;
1686 bio->bi_end_io = iomap_dio_bio_end_io;
1687
1688 ret = bio_iov_iter_get_pages(bio, &iter);
1689 if (unlikely(ret)) {
1690 /*
1691 * We have to stop part way through an IO. We must fall
1692 * through to the sub-block tail zeroing here, otherwise
1693 * this short IO may expose stale data in the tail of
1694 * the block we haven't written data to.
1695 */
1696 bio_put(bio);
1697 goto zero_tail;
1698 }
1699
1700 n = bio->bi_iter.bi_size;
1701 if (dio->flags & IOMAP_DIO_WRITE) {
1702 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1703 if (use_fua)
1704 bio->bi_opf |= REQ_FUA;
1705 else
1706 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
1707 task_io_account_write(n);
1708 } else {
1709 bio->bi_opf = REQ_OP_READ;
1710 if (dio->flags & IOMAP_DIO_DIRTY)
1711 bio_set_pages_dirty(bio);
1712 }
1713
1714 iov_iter_advance(dio->submit.iter, n);
1715
1716 dio->size += n;
1717 pos += n;
1718 copied += n;
1719
1720 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1721
1722 atomic_inc(&dio->ref);
1723
1724 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1725 dio->submit.cookie = submit_bio(bio);
1726 } while (nr_pages);
1727
1728 /*
1729 * We need to zeroout the tail of a sub-block write if the extent type
1730 * requires zeroing or the write extends beyond EOF. If we don't zero
1731 * the block tail in the latter case, we can expose stale data via mmap
1732 * reads of the EOF block.
1733 */
1734 zero_tail:
1735 if (need_zeroout ||
1736 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
1737 /* zero out from the end of the write to the end of the block */
1738 pad = pos & (fs_block_size - 1);
1739 if (pad)
1740 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1741 }
1742 return copied ? copied : ret;
1743 }
1744
1745 static loff_t
iomap_dio_hole_actor(loff_t length,struct iomap_dio * dio)1746 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1747 {
1748 length = iov_iter_zero(length, dio->submit.iter);
1749 dio->size += length;
1750 return length;
1751 }
1752
1753 static loff_t
iomap_dio_inline_actor(struct inode * inode,loff_t pos,loff_t length,struct iomap_dio * dio,struct iomap * iomap)1754 iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1755 struct iomap_dio *dio, struct iomap *iomap)
1756 {
1757 struct iov_iter *iter = dio->submit.iter;
1758 size_t copied;
1759
1760 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1761
1762 if (dio->flags & IOMAP_DIO_WRITE) {
1763 loff_t size = inode->i_size;
1764
1765 if (pos > size)
1766 memset(iomap->inline_data + size, 0, pos - size);
1767 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1768 if (copied) {
1769 if (pos + copied > size)
1770 i_size_write(inode, pos + copied);
1771 mark_inode_dirty(inode);
1772 }
1773 } else {
1774 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1775 }
1776 dio->size += copied;
1777 return copied;
1778 }
1779
1780 static loff_t
iomap_dio_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1781 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1782 void *data, struct iomap *iomap)
1783 {
1784 struct iomap_dio *dio = data;
1785
1786 switch (iomap->type) {
1787 case IOMAP_HOLE:
1788 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1789 return -EIO;
1790 return iomap_dio_hole_actor(length, dio);
1791 case IOMAP_UNWRITTEN:
1792 if (!(dio->flags & IOMAP_DIO_WRITE))
1793 return iomap_dio_hole_actor(length, dio);
1794 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1795 case IOMAP_MAPPED:
1796 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1797 case IOMAP_INLINE:
1798 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
1799 default:
1800 WARN_ON_ONCE(1);
1801 return -EIO;
1802 }
1803 }
1804
1805 /*
1806 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
1807 * is being issued as AIO or not. This allows us to optimise pure data writes
1808 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1809 * REQ_FLUSH post write. This is slightly tricky because a single request here
1810 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1811 * may be pure data writes. In that case, we still need to do a full data sync
1812 * completion.
1813 */
1814 ssize_t
iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,iomap_dio_end_io_t end_io)1815 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1816 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
1817 {
1818 struct address_space *mapping = iocb->ki_filp->f_mapping;
1819 struct inode *inode = file_inode(iocb->ki_filp);
1820 size_t count = iov_iter_count(iter);
1821 loff_t pos = iocb->ki_pos, start = pos;
1822 loff_t end = iocb->ki_pos + count - 1, ret = 0;
1823 unsigned int flags = IOMAP_DIRECT;
1824 bool wait_for_completion = is_sync_kiocb(iocb);
1825 struct blk_plug plug;
1826 struct iomap_dio *dio;
1827
1828 lockdep_assert_held(&inode->i_rwsem);
1829
1830 if (!count)
1831 return 0;
1832
1833 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1834 if (!dio)
1835 return -ENOMEM;
1836
1837 dio->iocb = iocb;
1838 atomic_set(&dio->ref, 1);
1839 dio->size = 0;
1840 dio->i_size = i_size_read(inode);
1841 dio->end_io = end_io;
1842 dio->error = 0;
1843 dio->flags = 0;
1844
1845 dio->submit.iter = iter;
1846 dio->submit.waiter = current;
1847 dio->submit.cookie = BLK_QC_T_NONE;
1848 dio->submit.last_queue = NULL;
1849
1850 if (iov_iter_rw(iter) == READ) {
1851 if (pos >= dio->i_size)
1852 goto out_free_dio;
1853
1854 if (iter->type == ITER_IOVEC)
1855 dio->flags |= IOMAP_DIO_DIRTY;
1856 } else {
1857 flags |= IOMAP_WRITE;
1858 dio->flags |= IOMAP_DIO_WRITE;
1859
1860 /* for data sync or sync, we need sync completion processing */
1861 if (iocb->ki_flags & IOCB_DSYNC)
1862 dio->flags |= IOMAP_DIO_NEED_SYNC;
1863
1864 /*
1865 * For datasync only writes, we optimistically try using FUA for
1866 * this IO. Any non-FUA write that occurs will clear this flag,
1867 * hence we know before completion whether a cache flush is
1868 * necessary.
1869 */
1870 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1871 dio->flags |= IOMAP_DIO_WRITE_FUA;
1872 }
1873
1874 if (iocb->ki_flags & IOCB_NOWAIT) {
1875 if (filemap_range_has_page(mapping, start, end)) {
1876 ret = -EAGAIN;
1877 goto out_free_dio;
1878 }
1879 flags |= IOMAP_NOWAIT;
1880 }
1881
1882 ret = filemap_write_and_wait_range(mapping, start, end);
1883 if (ret)
1884 goto out_free_dio;
1885
1886 /*
1887 * Try to invalidate cache pages for the range we're direct
1888 * writing. If this invalidation fails, tough, the write will
1889 * still work, but racing two incompatible write paths is a
1890 * pretty crazy thing to do, so we don't support it 100%.
1891 */
1892 ret = invalidate_inode_pages2_range(mapping,
1893 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1894 if (ret)
1895 dio_warn_stale_pagecache(iocb->ki_filp);
1896 ret = 0;
1897
1898 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1899 !inode->i_sb->s_dio_done_wq) {
1900 ret = sb_init_dio_done_wq(inode->i_sb);
1901 if (ret < 0)
1902 goto out_free_dio;
1903 }
1904
1905 inode_dio_begin(inode);
1906
1907 blk_start_plug(&plug);
1908 do {
1909 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1910 iomap_dio_actor);
1911 if (ret <= 0) {
1912 /* magic error code to fall back to buffered I/O */
1913 if (ret == -ENOTBLK) {
1914 wait_for_completion = true;
1915 ret = 0;
1916 }
1917 break;
1918 }
1919 pos += ret;
1920
1921 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
1922 /*
1923 * We only report that we've read data up to i_size.
1924 * Revert iter to a state corresponding to that as
1925 * some callers (such as splice code) rely on it.
1926 */
1927 iov_iter_revert(iter, pos - dio->i_size);
1928 break;
1929 }
1930 } while ((count = iov_iter_count(iter)) > 0);
1931 blk_finish_plug(&plug);
1932
1933 if (ret < 0)
1934 iomap_dio_set_error(dio, ret);
1935
1936 /*
1937 * If all the writes we issued were FUA, we don't need to flush the
1938 * cache on IO completion. Clear the sync flag for this case.
1939 */
1940 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1941 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1942
1943 /*
1944 * We are about to drop our additional submission reference, which
1945 * might be the last reference to the dio. There are three three
1946 * different ways we can progress here:
1947 *
1948 * (a) If this is the last reference we will always complete and free
1949 * the dio ourselves.
1950 * (b) If this is not the last reference, and we serve an asynchronous
1951 * iocb, we must never touch the dio after the decrement, the
1952 * I/O completion handler will complete and free it.
1953 * (c) If this is not the last reference, but we serve a synchronous
1954 * iocb, the I/O completion handler will wake us up on the drop
1955 * of the final reference, and we will complete and free it here
1956 * after we got woken by the I/O completion handler.
1957 */
1958 dio->wait_for_completion = wait_for_completion;
1959 if (!atomic_dec_and_test(&dio->ref)) {
1960 if (!wait_for_completion)
1961 return -EIOCBQUEUED;
1962
1963 for (;;) {
1964 set_current_state(TASK_UNINTERRUPTIBLE);
1965 if (!READ_ONCE(dio->submit.waiter))
1966 break;
1967
1968 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1969 !dio->submit.last_queue ||
1970 !blk_poll(dio->submit.last_queue,
1971 dio->submit.cookie))
1972 io_schedule();
1973 }
1974 __set_current_state(TASK_RUNNING);
1975 }
1976
1977 return iomap_dio_complete(dio);
1978
1979 out_free_dio:
1980 kfree(dio);
1981 return ret;
1982 }
1983 EXPORT_SYMBOL_GPL(iomap_dio_rw);
1984
1985 /* Swapfile activation */
1986
1987 #ifdef CONFIG_SWAP
1988 struct iomap_swapfile_info {
1989 struct iomap iomap; /* accumulated iomap */
1990 struct swap_info_struct *sis;
1991 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1992 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1993 unsigned long nr_pages; /* number of pages collected */
1994 int nr_extents; /* extent count */
1995 };
1996
1997 /*
1998 * Collect physical extents for this swap file. Physical extents reported to
1999 * the swap code must be trimmed to align to a page boundary. The logical
2000 * offset within the file is irrelevant since the swapfile code maps logical
2001 * page numbers of the swap device to the physical page-aligned extents.
2002 */
iomap_swapfile_add_extent(struct iomap_swapfile_info * isi)2003 static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
2004 {
2005 struct iomap *iomap = &isi->iomap;
2006 unsigned long nr_pages;
2007 uint64_t first_ppage;
2008 uint64_t first_ppage_reported;
2009 uint64_t next_ppage;
2010 int error;
2011
2012 /*
2013 * Round the start up and the end down so that the physical
2014 * extent aligns to a page boundary.
2015 */
2016 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2017 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2018 PAGE_SHIFT;
2019
2020 /* Skip too-short physical extents. */
2021 if (first_ppage >= next_ppage)
2022 return 0;
2023 nr_pages = next_ppage - first_ppage;
2024
2025 /*
2026 * Calculate how much swap space we're adding; the first page contains
2027 * the swap header and doesn't count. The mm still wants that first
2028 * page fed to add_swap_extent, however.
2029 */
2030 first_ppage_reported = first_ppage;
2031 if (iomap->offset == 0)
2032 first_ppage_reported++;
2033 if (isi->lowest_ppage > first_ppage_reported)
2034 isi->lowest_ppage = first_ppage_reported;
2035 if (isi->highest_ppage < (next_ppage - 1))
2036 isi->highest_ppage = next_ppage - 1;
2037
2038 /* Add extent, set up for the next call. */
2039 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2040 if (error < 0)
2041 return error;
2042 isi->nr_extents += error;
2043 isi->nr_pages += nr_pages;
2044 return 0;
2045 }
2046
2047 /*
2048 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2049 * swap only cares about contiguous page-aligned physical extents and makes no
2050 * distinction between written and unwritten extents.
2051 */
iomap_swapfile_activate_actor(struct inode * inode,loff_t pos,loff_t count,void * data,struct iomap * iomap)2052 static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2053 loff_t count, void *data, struct iomap *iomap)
2054 {
2055 struct iomap_swapfile_info *isi = data;
2056 int error;
2057
2058 switch (iomap->type) {
2059 case IOMAP_MAPPED:
2060 case IOMAP_UNWRITTEN:
2061 /* Only real or unwritten extents. */
2062 break;
2063 case IOMAP_INLINE:
2064 /* No inline data. */
2065 pr_err("swapon: file is inline\n");
2066 return -EINVAL;
2067 default:
2068 pr_err("swapon: file has unallocated extents\n");
2069 return -EINVAL;
2070 }
2071
2072 /* No uncommitted metadata or shared blocks. */
2073 if (iomap->flags & IOMAP_F_DIRTY) {
2074 pr_err("swapon: file is not committed\n");
2075 return -EINVAL;
2076 }
2077 if (iomap->flags & IOMAP_F_SHARED) {
2078 pr_err("swapon: file has shared extents\n");
2079 return -EINVAL;
2080 }
2081
2082 /* Only one bdev per swap file. */
2083 if (iomap->bdev != isi->sis->bdev) {
2084 pr_err("swapon: file is on multiple devices\n");
2085 return -EINVAL;
2086 }
2087
2088 if (isi->iomap.length == 0) {
2089 /* No accumulated extent, so just store it. */
2090 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2091 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2092 /* Append this to the accumulated extent. */
2093 isi->iomap.length += iomap->length;
2094 } else {
2095 /* Otherwise, add the retained iomap and store this one. */
2096 error = iomap_swapfile_add_extent(isi);
2097 if (error)
2098 return error;
2099 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2100 }
2101 return count;
2102 }
2103
2104 /*
2105 * Iterate a swap file's iomaps to construct physical extents that can be
2106 * passed to the swapfile subsystem.
2107 */
iomap_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * pagespan,const struct iomap_ops * ops)2108 int iomap_swapfile_activate(struct swap_info_struct *sis,
2109 struct file *swap_file, sector_t *pagespan,
2110 const struct iomap_ops *ops)
2111 {
2112 struct iomap_swapfile_info isi = {
2113 .sis = sis,
2114 .lowest_ppage = (sector_t)-1ULL,
2115 };
2116 struct address_space *mapping = swap_file->f_mapping;
2117 struct inode *inode = mapping->host;
2118 loff_t pos = 0;
2119 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2120 loff_t ret;
2121
2122 /*
2123 * Persist all file mapping metadata so that we won't have any
2124 * IOMAP_F_DIRTY iomaps.
2125 */
2126 ret = vfs_fsync(swap_file, 1);
2127 if (ret)
2128 return ret;
2129
2130 while (len > 0) {
2131 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2132 ops, &isi, iomap_swapfile_activate_actor);
2133 if (ret <= 0)
2134 return ret;
2135
2136 pos += ret;
2137 len -= ret;
2138 }
2139
2140 if (isi.iomap.length) {
2141 ret = iomap_swapfile_add_extent(&isi);
2142 if (ret)
2143 return ret;
2144 }
2145
2146 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2147 sis->max = isi.nr_pages;
2148 sis->pages = isi.nr_pages - 1;
2149 sis->highest_bit = isi.nr_pages - 1;
2150 return isi.nr_extents;
2151 }
2152 EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2153 #endif /* CONFIG_SWAP */
2154
2155 static loff_t
iomap_bmap_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)2156 iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2157 void *data, struct iomap *iomap)
2158 {
2159 sector_t *bno = data, addr;
2160
2161 if (iomap->type == IOMAP_MAPPED) {
2162 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2163 if (addr > INT_MAX)
2164 WARN(1, "would truncate bmap result\n");
2165 else
2166 *bno = addr;
2167 }
2168 return 0;
2169 }
2170
2171 /* legacy ->bmap interface. 0 is the error return (!) */
2172 sector_t
iomap_bmap(struct address_space * mapping,sector_t bno,const struct iomap_ops * ops)2173 iomap_bmap(struct address_space *mapping, sector_t bno,
2174 const struct iomap_ops *ops)
2175 {
2176 struct inode *inode = mapping->host;
2177 loff_t pos = bno << inode->i_blkbits;
2178 unsigned blocksize = i_blocksize(inode);
2179
2180 if (filemap_write_and_wait(mapping))
2181 return 0;
2182
2183 bno = 0;
2184 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2185 return bno;
2186 }
2187 EXPORT_SYMBOL_GPL(iomap_bmap);
2188