1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_extfree_item.h"
21 #include "xfs_alloc.h"
22 #include "xfs_bmap.h"
23 #include "xfs_bmap_util.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_rtalloc.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_trace.h"
30 #include "xfs_icache.h"
31 #include "xfs_log.h"
32 #include "xfs_rmap_btree.h"
33 #include "xfs_iomap.h"
34 #include "xfs_reflink.h"
35 #include "xfs_refcount.h"
36 
37 /* Kernel only BMAP related definitions and functions */
38 
39 /*
40  * Convert the given file system block to a disk block.  We have to treat it
41  * differently based on whether the file is a real time file or not, because the
42  * bmap code does.
43  */
44 xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode * ip,xfs_fsblock_t fsb)45 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
46 {
47 	return (XFS_IS_REALTIME_INODE(ip) ? \
48 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
49 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
50 }
51 
52 /*
53  * Routine to zero an extent on disk allocated to the specific inode.
54  *
55  * The VFS functions take a linearised filesystem block offset, so we have to
56  * convert the sparse xfs fsb to the right format first.
57  * VFS types are real funky, too.
58  */
59 int
xfs_zero_extent(struct xfs_inode * ip,xfs_fsblock_t start_fsb,xfs_off_t count_fsb)60 xfs_zero_extent(
61 	struct xfs_inode *ip,
62 	xfs_fsblock_t	start_fsb,
63 	xfs_off_t	count_fsb)
64 {
65 	struct xfs_mount *mp = ip->i_mount;
66 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
67 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
68 
69 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
70 		block << (mp->m_super->s_blocksize_bits - 9),
71 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
72 		GFP_NOFS, 0);
73 }
74 
75 #ifdef CONFIG_XFS_RT
76 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)77 xfs_bmap_rtalloc(
78 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
79 {
80 	int		error;		/* error return value */
81 	xfs_mount_t	*mp;		/* mount point structure */
82 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
83 	xfs_extlen_t	mod = 0;	/* product factor for allocators */
84 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
85 	xfs_extlen_t	align;		/* minimum allocation alignment */
86 	xfs_rtblock_t	rtb;
87 
88 	mp = ap->ip->i_mount;
89 	align = xfs_get_extsz_hint(ap->ip);
90 	prod = align / mp->m_sb.sb_rextsize;
91 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
92 					align, 1, ap->eof, 0,
93 					ap->conv, &ap->offset, &ap->length);
94 	if (error)
95 		return error;
96 	ASSERT(ap->length);
97 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
98 
99 	/*
100 	 * If the offset & length are not perfectly aligned
101 	 * then kill prod, it will just get us in trouble.
102 	 */
103 	div_u64_rem(ap->offset, align, &mod);
104 	if (mod || ap->length % align)
105 		prod = 1;
106 	/*
107 	 * Set ralen to be the actual requested length in rtextents.
108 	 */
109 	ralen = ap->length / mp->m_sb.sb_rextsize;
110 	/*
111 	 * If the old value was close enough to MAXEXTLEN that
112 	 * we rounded up to it, cut it back so it's valid again.
113 	 * Note that if it's a really large request (bigger than
114 	 * MAXEXTLEN), we don't hear about that number, and can't
115 	 * adjust the starting point to match it.
116 	 */
117 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
118 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
119 
120 	/*
121 	 * Lock out modifications to both the RT bitmap and summary inodes
122 	 */
123 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
124 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
125 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
126 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
127 
128 	/*
129 	 * If it's an allocation to an empty file at offset 0,
130 	 * pick an extent that will space things out in the rt area.
131 	 */
132 	if (ap->eof && ap->offset == 0) {
133 		xfs_rtblock_t rtx; /* realtime extent no */
134 
135 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
136 		if (error)
137 			return error;
138 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
139 	} else {
140 		ap->blkno = 0;
141 	}
142 
143 	xfs_bmap_adjacent(ap);
144 
145 	/*
146 	 * Realtime allocation, done through xfs_rtallocate_extent.
147 	 */
148 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
149 	rtb = ap->blkno;
150 	ap->length = ralen;
151 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
152 				&ralen, ap->wasdel, prod, &rtb);
153 	if (error)
154 		return error;
155 
156 	ap->blkno = rtb;
157 	if (ap->blkno != NULLFSBLOCK) {
158 		ap->blkno *= mp->m_sb.sb_rextsize;
159 		ralen *= mp->m_sb.sb_rextsize;
160 		ap->length = ralen;
161 		ap->ip->i_d.di_nblocks += ralen;
162 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
163 		if (ap->wasdel)
164 			ap->ip->i_delayed_blks -= ralen;
165 		/*
166 		 * Adjust the disk quota also. This was reserved
167 		 * earlier.
168 		 */
169 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
170 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
171 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
172 
173 		/* Zero the extent if we were asked to do so */
174 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
175 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
176 			if (error)
177 				return error;
178 		}
179 	} else {
180 		ap->length = 0;
181 	}
182 	return 0;
183 }
184 #endif /* CONFIG_XFS_RT */
185 
186 /*
187  * Check if the endoff is outside the last extent. If so the caller will grow
188  * the allocation to a stripe unit boundary.  All offsets are considered outside
189  * the end of file for an empty fork, so 1 is returned in *eof in that case.
190  */
191 int
xfs_bmap_eof(struct xfs_inode * ip,xfs_fileoff_t endoff,int whichfork,int * eof)192 xfs_bmap_eof(
193 	struct xfs_inode	*ip,
194 	xfs_fileoff_t		endoff,
195 	int			whichfork,
196 	int			*eof)
197 {
198 	struct xfs_bmbt_irec	rec;
199 	int			error;
200 
201 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
202 	if (error || *eof)
203 		return error;
204 
205 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
206 	return 0;
207 }
208 
209 /*
210  * Extent tree block counting routines.
211  */
212 
213 /*
214  * Count leaf blocks given a range of extent records.  Delayed allocation
215  * extents are not counted towards the totals.
216  */
217 xfs_extnum_t
xfs_bmap_count_leaves(struct xfs_ifork * ifp,xfs_filblks_t * count)218 xfs_bmap_count_leaves(
219 	struct xfs_ifork	*ifp,
220 	xfs_filblks_t		*count)
221 {
222 	struct xfs_iext_cursor	icur;
223 	struct xfs_bmbt_irec	got;
224 	xfs_extnum_t		numrecs = 0;
225 
226 	for_each_xfs_iext(ifp, &icur, &got) {
227 		if (!isnullstartblock(got.br_startblock)) {
228 			*count += got.br_blockcount;
229 			numrecs++;
230 		}
231 	}
232 
233 	return numrecs;
234 }
235 
236 /*
237  * Count leaf blocks given a range of extent records originally
238  * in btree format.
239  */
240 STATIC void
xfs_bmap_disk_count_leaves(struct xfs_mount * mp,struct xfs_btree_block * block,int numrecs,xfs_filblks_t * count)241 xfs_bmap_disk_count_leaves(
242 	struct xfs_mount	*mp,
243 	struct xfs_btree_block	*block,
244 	int			numrecs,
245 	xfs_filblks_t		*count)
246 {
247 	int		b;
248 	xfs_bmbt_rec_t	*frp;
249 
250 	for (b = 1; b <= numrecs; b++) {
251 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
252 		*count += xfs_bmbt_disk_get_blockcount(frp);
253 	}
254 }
255 
256 /*
257  * Recursively walks each level of a btree
258  * to count total fsblocks in use.
259  */
260 STATIC int
xfs_bmap_count_tree(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_ifork * ifp,xfs_fsblock_t blockno,int levelin,xfs_extnum_t * nextents,xfs_filblks_t * count)261 xfs_bmap_count_tree(
262 	struct xfs_mount	*mp,
263 	struct xfs_trans	*tp,
264 	struct xfs_ifork	*ifp,
265 	xfs_fsblock_t		blockno,
266 	int			levelin,
267 	xfs_extnum_t		*nextents,
268 	xfs_filblks_t		*count)
269 {
270 	int			error;
271 	struct xfs_buf		*bp, *nbp;
272 	int			level = levelin;
273 	__be64			*pp;
274 	xfs_fsblock_t           bno = blockno;
275 	xfs_fsblock_t		nextbno;
276 	struct xfs_btree_block	*block, *nextblock;
277 	int			numrecs;
278 
279 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
280 						&xfs_bmbt_buf_ops);
281 	if (error)
282 		return error;
283 	*count += 1;
284 	block = XFS_BUF_TO_BLOCK(bp);
285 
286 	if (--level) {
287 		/* Not at node above leaves, count this level of nodes */
288 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
289 		while (nextbno != NULLFSBLOCK) {
290 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
291 						XFS_BMAP_BTREE_REF,
292 						&xfs_bmbt_buf_ops);
293 			if (error)
294 				return error;
295 			*count += 1;
296 			nextblock = XFS_BUF_TO_BLOCK(nbp);
297 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
298 			xfs_trans_brelse(tp, nbp);
299 		}
300 
301 		/* Dive to the next level */
302 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
303 		bno = be64_to_cpu(*pp);
304 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
305 				count);
306 		if (error) {
307 			xfs_trans_brelse(tp, bp);
308 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
309 					 XFS_ERRLEVEL_LOW, mp);
310 			return -EFSCORRUPTED;
311 		}
312 		xfs_trans_brelse(tp, bp);
313 	} else {
314 		/* count all level 1 nodes and their leaves */
315 		for (;;) {
316 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
317 			numrecs = be16_to_cpu(block->bb_numrecs);
318 			(*nextents) += numrecs;
319 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
320 			xfs_trans_brelse(tp, bp);
321 			if (nextbno == NULLFSBLOCK)
322 				break;
323 			bno = nextbno;
324 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
325 						XFS_BMAP_BTREE_REF,
326 						&xfs_bmbt_buf_ops);
327 			if (error)
328 				return error;
329 			*count += 1;
330 			block = XFS_BUF_TO_BLOCK(bp);
331 		}
332 	}
333 	return 0;
334 }
335 
336 /*
337  * Count fsblocks of the given fork.  Delayed allocation extents are
338  * not counted towards the totals.
339  */
340 int
xfs_bmap_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)341 xfs_bmap_count_blocks(
342 	struct xfs_trans	*tp,
343 	struct xfs_inode	*ip,
344 	int			whichfork,
345 	xfs_extnum_t		*nextents,
346 	xfs_filblks_t		*count)
347 {
348 	struct xfs_mount	*mp;	/* file system mount structure */
349 	__be64			*pp;	/* pointer to block address */
350 	struct xfs_btree_block	*block;	/* current btree block */
351 	struct xfs_ifork	*ifp;	/* fork structure */
352 	xfs_fsblock_t		bno;	/* block # of "block" */
353 	int			level;	/* btree level, for checking */
354 	int			error;
355 
356 	bno = NULLFSBLOCK;
357 	mp = ip->i_mount;
358 	*nextents = 0;
359 	*count = 0;
360 	ifp = XFS_IFORK_PTR(ip, whichfork);
361 	if (!ifp)
362 		return 0;
363 
364 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
365 	case XFS_DINODE_FMT_EXTENTS:
366 		*nextents = xfs_bmap_count_leaves(ifp, count);
367 		return 0;
368 	case XFS_DINODE_FMT_BTREE:
369 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
370 			error = xfs_iread_extents(tp, ip, whichfork);
371 			if (error)
372 				return error;
373 		}
374 
375 		/*
376 		 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
377 		 */
378 		block = ifp->if_broot;
379 		level = be16_to_cpu(block->bb_level);
380 		ASSERT(level > 0);
381 		pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
382 		bno = be64_to_cpu(*pp);
383 		ASSERT(bno != NULLFSBLOCK);
384 		ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
385 		ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
386 
387 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
388 				nextents, count);
389 		if (error) {
390 			XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
391 					XFS_ERRLEVEL_LOW, mp);
392 			return -EFSCORRUPTED;
393 		}
394 		return 0;
395 	}
396 
397 	return 0;
398 }
399 
400 static int
xfs_getbmap_report_one(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,struct xfs_bmbt_irec * got)401 xfs_getbmap_report_one(
402 	struct xfs_inode	*ip,
403 	struct getbmapx		*bmv,
404 	struct kgetbmap		*out,
405 	int64_t			bmv_end,
406 	struct xfs_bmbt_irec	*got)
407 {
408 	struct kgetbmap		*p = out + bmv->bmv_entries;
409 	bool			shared = false, trimmed = false;
410 	int			error;
411 
412 	error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
413 	if (error)
414 		return error;
415 
416 	if (isnullstartblock(got->br_startblock) ||
417 	    got->br_startblock == DELAYSTARTBLOCK) {
418 		/*
419 		 * Delalloc extents that start beyond EOF can occur due to
420 		 * speculative EOF allocation when the delalloc extent is larger
421 		 * than the largest freespace extent at conversion time.  These
422 		 * extents cannot be converted by data writeback, so can exist
423 		 * here even if we are not supposed to be finding delalloc
424 		 * extents.
425 		 */
426 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
427 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
428 
429 		p->bmv_oflags |= BMV_OF_DELALLOC;
430 		p->bmv_block = -2;
431 	} else {
432 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
433 	}
434 
435 	if (got->br_state == XFS_EXT_UNWRITTEN &&
436 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
437 		p->bmv_oflags |= BMV_OF_PREALLOC;
438 
439 	if (shared)
440 		p->bmv_oflags |= BMV_OF_SHARED;
441 
442 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
443 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
444 
445 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
446 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
447 	bmv->bmv_entries++;
448 	return 0;
449 }
450 
451 static void
xfs_getbmap_report_hole(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,xfs_fileoff_t bno,xfs_fileoff_t end)452 xfs_getbmap_report_hole(
453 	struct xfs_inode	*ip,
454 	struct getbmapx		*bmv,
455 	struct kgetbmap		*out,
456 	int64_t			bmv_end,
457 	xfs_fileoff_t		bno,
458 	xfs_fileoff_t		end)
459 {
460 	struct kgetbmap		*p = out + bmv->bmv_entries;
461 
462 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
463 		return;
464 
465 	p->bmv_block = -1;
466 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
467 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
468 
469 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
470 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
471 	bmv->bmv_entries++;
472 }
473 
474 static inline bool
xfs_getbmap_full(struct getbmapx * bmv)475 xfs_getbmap_full(
476 	struct getbmapx		*bmv)
477 {
478 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
479 }
480 
481 static bool
xfs_getbmap_next_rec(struct xfs_bmbt_irec * rec,xfs_fileoff_t total_end)482 xfs_getbmap_next_rec(
483 	struct xfs_bmbt_irec	*rec,
484 	xfs_fileoff_t		total_end)
485 {
486 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
487 
488 	if (end == total_end)
489 		return false;
490 
491 	rec->br_startoff += rec->br_blockcount;
492 	if (!isnullstartblock(rec->br_startblock) &&
493 	    rec->br_startblock != DELAYSTARTBLOCK)
494 		rec->br_startblock += rec->br_blockcount;
495 	rec->br_blockcount = total_end - end;
496 	return true;
497 }
498 
499 /*
500  * Get inode's extents as described in bmv, and format for output.
501  * Calls formatter to fill the user's buffer until all extents
502  * are mapped, until the passed-in bmv->bmv_count slots have
503  * been filled, or until the formatter short-circuits the loop,
504  * if it is tracking filled-in extents on its own.
505  */
506 int						/* error code */
xfs_getbmap(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out)507 xfs_getbmap(
508 	struct xfs_inode	*ip,
509 	struct getbmapx		*bmv,		/* user bmap structure */
510 	struct kgetbmap		*out)
511 {
512 	struct xfs_mount	*mp = ip->i_mount;
513 	int			iflags = bmv->bmv_iflags;
514 	int			whichfork, lock, error = 0;
515 	int64_t			bmv_end, max_len;
516 	xfs_fileoff_t		bno, first_bno;
517 	struct xfs_ifork	*ifp;
518 	struct xfs_bmbt_irec	got, rec;
519 	xfs_filblks_t		len;
520 	struct xfs_iext_cursor	icur;
521 
522 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
523 		return -EINVAL;
524 #ifndef DEBUG
525 	/* Only allow CoW fork queries if we're debugging. */
526 	if (iflags & BMV_IF_COWFORK)
527 		return -EINVAL;
528 #endif
529 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
530 		return -EINVAL;
531 
532 	if (bmv->bmv_length < -1)
533 		return -EINVAL;
534 	bmv->bmv_entries = 0;
535 	if (bmv->bmv_length == 0)
536 		return 0;
537 
538 	if (iflags & BMV_IF_ATTRFORK)
539 		whichfork = XFS_ATTR_FORK;
540 	else if (iflags & BMV_IF_COWFORK)
541 		whichfork = XFS_COW_FORK;
542 	else
543 		whichfork = XFS_DATA_FORK;
544 	ifp = XFS_IFORK_PTR(ip, whichfork);
545 
546 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
547 	switch (whichfork) {
548 	case XFS_ATTR_FORK:
549 		if (!XFS_IFORK_Q(ip))
550 			goto out_unlock_iolock;
551 
552 		max_len = 1LL << 32;
553 		lock = xfs_ilock_attr_map_shared(ip);
554 		break;
555 	case XFS_COW_FORK:
556 		/* No CoW fork? Just return */
557 		if (!ifp)
558 			goto out_unlock_iolock;
559 
560 		if (xfs_get_cowextsz_hint(ip))
561 			max_len = mp->m_super->s_maxbytes;
562 		else
563 			max_len = XFS_ISIZE(ip);
564 
565 		lock = XFS_ILOCK_SHARED;
566 		xfs_ilock(ip, lock);
567 		break;
568 	case XFS_DATA_FORK:
569 		if (!(iflags & BMV_IF_DELALLOC) &&
570 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
571 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
572 			if (error)
573 				goto out_unlock_iolock;
574 
575 			/*
576 			 * Even after flushing the inode, there can still be
577 			 * delalloc blocks on the inode beyond EOF due to
578 			 * speculative preallocation.  These are not removed
579 			 * until the release function is called or the inode
580 			 * is inactivated.  Hence we cannot assert here that
581 			 * ip->i_delayed_blks == 0.
582 			 */
583 		}
584 
585 		if (xfs_get_extsz_hint(ip) ||
586 		    (ip->i_d.di_flags &
587 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
588 			max_len = mp->m_super->s_maxbytes;
589 		else
590 			max_len = XFS_ISIZE(ip);
591 
592 		lock = xfs_ilock_data_map_shared(ip);
593 		break;
594 	}
595 
596 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
597 	case XFS_DINODE_FMT_EXTENTS:
598 	case XFS_DINODE_FMT_BTREE:
599 		break;
600 	case XFS_DINODE_FMT_LOCAL:
601 		/* Local format inode forks report no extents. */
602 		goto out_unlock_ilock;
603 	default:
604 		error = -EINVAL;
605 		goto out_unlock_ilock;
606 	}
607 
608 	if (bmv->bmv_length == -1) {
609 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
610 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
611 	}
612 
613 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
614 
615 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
616 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
617 
618 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
619 		error = xfs_iread_extents(NULL, ip, whichfork);
620 		if (error)
621 			goto out_unlock_ilock;
622 	}
623 
624 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
625 		/*
626 		 * Report a whole-file hole if the delalloc flag is set to
627 		 * stay compatible with the old implementation.
628 		 */
629 		if (iflags & BMV_IF_DELALLOC)
630 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
631 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
632 		goto out_unlock_ilock;
633 	}
634 
635 	while (!xfs_getbmap_full(bmv)) {
636 		xfs_trim_extent(&got, first_bno, len);
637 
638 		/*
639 		 * Report an entry for a hole if this extent doesn't directly
640 		 * follow the previous one.
641 		 */
642 		if (got.br_startoff > bno) {
643 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
644 					got.br_startoff);
645 			if (xfs_getbmap_full(bmv))
646 				break;
647 		}
648 
649 		/*
650 		 * In order to report shared extents accurately, we report each
651 		 * distinct shared / unshared part of a single bmbt record with
652 		 * an individual getbmapx record.
653 		 */
654 		bno = got.br_startoff + got.br_blockcount;
655 		rec = got;
656 		do {
657 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
658 					&rec);
659 			if (error || xfs_getbmap_full(bmv))
660 				goto out_unlock_ilock;
661 		} while (xfs_getbmap_next_rec(&rec, bno));
662 
663 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
664 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
665 
666 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
667 
668 			if (whichfork != XFS_ATTR_FORK && bno < end &&
669 			    !xfs_getbmap_full(bmv)) {
670 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
671 						bno, end);
672 			}
673 			break;
674 		}
675 
676 		if (bno >= first_bno + len)
677 			break;
678 	}
679 
680 out_unlock_ilock:
681 	xfs_iunlock(ip, lock);
682 out_unlock_iolock:
683 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
684 	return error;
685 }
686 
687 /*
688  * Dead simple method of punching delalyed allocation blocks from a range in
689  * the inode.  This will always punch out both the start and end blocks, even
690  * if the ranges only partially overlap them, so it is up to the caller to
691  * ensure that partial blocks are not passed in.
692  */
693 int
xfs_bmap_punch_delalloc_range(struct xfs_inode * ip,xfs_fileoff_t start_fsb,xfs_fileoff_t length)694 xfs_bmap_punch_delalloc_range(
695 	struct xfs_inode	*ip,
696 	xfs_fileoff_t		start_fsb,
697 	xfs_fileoff_t		length)
698 {
699 	struct xfs_ifork	*ifp = &ip->i_df;
700 	xfs_fileoff_t		end_fsb = start_fsb + length;
701 	struct xfs_bmbt_irec	got, del;
702 	struct xfs_iext_cursor	icur;
703 	int			error = 0;
704 
705 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
706 
707 	xfs_ilock(ip, XFS_ILOCK_EXCL);
708 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
709 		goto out_unlock;
710 
711 	while (got.br_startoff + got.br_blockcount > start_fsb) {
712 		del = got;
713 		xfs_trim_extent(&del, start_fsb, length);
714 
715 		/*
716 		 * A delete can push the cursor forward. Step back to the
717 		 * previous extent on non-delalloc or extents outside the
718 		 * target range.
719 		 */
720 		if (!del.br_blockcount ||
721 		    !isnullstartblock(del.br_startblock)) {
722 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
723 				break;
724 			continue;
725 		}
726 
727 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
728 						  &got, &del);
729 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
730 			break;
731 	}
732 
733 out_unlock:
734 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
735 	return error;
736 }
737 
738 /*
739  * Test whether it is appropriate to check an inode for and free post EOF
740  * blocks. The 'force' parameter determines whether we should also consider
741  * regular files that are marked preallocated or append-only.
742  */
743 bool
xfs_can_free_eofblocks(struct xfs_inode * ip,bool force)744 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
745 {
746 	/* prealloc/delalloc exists only on regular files */
747 	if (!S_ISREG(VFS_I(ip)->i_mode))
748 		return false;
749 
750 	/*
751 	 * Zero sized files with no cached pages and delalloc blocks will not
752 	 * have speculative prealloc/delalloc blocks to remove.
753 	 */
754 	if (VFS_I(ip)->i_size == 0 &&
755 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
756 	    ip->i_delayed_blks == 0)
757 		return false;
758 
759 	/* If we haven't read in the extent list, then don't do it now. */
760 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
761 		return false;
762 
763 	/*
764 	 * Do not free real preallocated or append-only files unless the file
765 	 * has delalloc blocks and we are forced to remove them.
766 	 */
767 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
768 		if (!force || ip->i_delayed_blks == 0)
769 			return false;
770 
771 	return true;
772 }
773 
774 /*
775  * This is called to free any blocks beyond eof. The caller must hold
776  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
777  * reference to the inode.
778  */
779 int
xfs_free_eofblocks(struct xfs_inode * ip)780 xfs_free_eofblocks(
781 	struct xfs_inode	*ip)
782 {
783 	struct xfs_trans	*tp;
784 	int			error;
785 	xfs_fileoff_t		end_fsb;
786 	xfs_fileoff_t		last_fsb;
787 	xfs_filblks_t		map_len;
788 	int			nimaps;
789 	struct xfs_bmbt_irec	imap;
790 	struct xfs_mount	*mp = ip->i_mount;
791 
792 	/*
793 	 * Figure out if there are any blocks beyond the end
794 	 * of the file.  If not, then there is nothing to do.
795 	 */
796 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
797 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
798 	if (last_fsb <= end_fsb)
799 		return 0;
800 	map_len = last_fsb - end_fsb;
801 
802 	nimaps = 1;
803 	xfs_ilock(ip, XFS_ILOCK_SHARED);
804 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
805 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
806 
807 	/*
808 	 * If there are blocks after the end of file, truncate the file to its
809 	 * current size to free them up.
810 	 */
811 	if (!error && (nimaps != 0) &&
812 	    (imap.br_startblock != HOLESTARTBLOCK ||
813 	     ip->i_delayed_blks)) {
814 		/*
815 		 * Attach the dquots to the inode up front.
816 		 */
817 		error = xfs_qm_dqattach(ip);
818 		if (error)
819 			return error;
820 
821 		/* wait on dio to ensure i_size has settled */
822 		inode_dio_wait(VFS_I(ip));
823 
824 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
825 				&tp);
826 		if (error) {
827 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
828 			return error;
829 		}
830 
831 		xfs_ilock(ip, XFS_ILOCK_EXCL);
832 		xfs_trans_ijoin(tp, ip, 0);
833 
834 		/*
835 		 * Do not update the on-disk file size.  If we update the
836 		 * on-disk file size and then the system crashes before the
837 		 * contents of the file are flushed to disk then the files
838 		 * may be full of holes (ie NULL files bug).
839 		 */
840 		error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
841 					XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
842 		if (error) {
843 			/*
844 			 * If we get an error at this point we simply don't
845 			 * bother truncating the file.
846 			 */
847 			xfs_trans_cancel(tp);
848 		} else {
849 			error = xfs_trans_commit(tp);
850 			if (!error)
851 				xfs_inode_clear_eofblocks_tag(ip);
852 		}
853 
854 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
855 	}
856 	return error;
857 }
858 
859 int
xfs_alloc_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len,int alloc_type)860 xfs_alloc_file_space(
861 	struct xfs_inode	*ip,
862 	xfs_off_t		offset,
863 	xfs_off_t		len,
864 	int			alloc_type)
865 {
866 	xfs_mount_t		*mp = ip->i_mount;
867 	xfs_off_t		count;
868 	xfs_filblks_t		allocated_fsb;
869 	xfs_filblks_t		allocatesize_fsb;
870 	xfs_extlen_t		extsz, temp;
871 	xfs_fileoff_t		startoffset_fsb;
872 	int			nimaps;
873 	int			quota_flag;
874 	int			rt;
875 	xfs_trans_t		*tp;
876 	xfs_bmbt_irec_t		imaps[1], *imapp;
877 	uint			qblocks, resblks, resrtextents;
878 	int			error;
879 
880 	trace_xfs_alloc_file_space(ip);
881 
882 	if (XFS_FORCED_SHUTDOWN(mp))
883 		return -EIO;
884 
885 	error = xfs_qm_dqattach(ip);
886 	if (error)
887 		return error;
888 
889 	if (len <= 0)
890 		return -EINVAL;
891 
892 	rt = XFS_IS_REALTIME_INODE(ip);
893 	extsz = xfs_get_extsz_hint(ip);
894 
895 	count = len;
896 	imapp = &imaps[0];
897 	nimaps = 1;
898 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
899 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
900 
901 	/*
902 	 * Allocate file space until done or until there is an error
903 	 */
904 	while (allocatesize_fsb && !error) {
905 		xfs_fileoff_t	s, e;
906 
907 		/*
908 		 * Determine space reservations for data/realtime.
909 		 */
910 		if (unlikely(extsz)) {
911 			s = startoffset_fsb;
912 			do_div(s, extsz);
913 			s *= extsz;
914 			e = startoffset_fsb + allocatesize_fsb;
915 			div_u64_rem(startoffset_fsb, extsz, &temp);
916 			if (temp)
917 				e += temp;
918 			div_u64_rem(e, extsz, &temp);
919 			if (temp)
920 				e += extsz - temp;
921 		} else {
922 			s = 0;
923 			e = allocatesize_fsb;
924 		}
925 
926 		/*
927 		 * The transaction reservation is limited to a 32-bit block
928 		 * count, hence we need to limit the number of blocks we are
929 		 * trying to reserve to avoid an overflow. We can't allocate
930 		 * more than @nimaps extents, and an extent is limited on disk
931 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
932 		 */
933 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
934 		if (unlikely(rt)) {
935 			resrtextents = qblocks = resblks;
936 			resrtextents /= mp->m_sb.sb_rextsize;
937 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
938 			quota_flag = XFS_QMOPT_RES_RTBLKS;
939 		} else {
940 			resrtextents = 0;
941 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
942 			quota_flag = XFS_QMOPT_RES_REGBLKS;
943 		}
944 
945 		/*
946 		 * Allocate and setup the transaction.
947 		 */
948 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
949 				resrtextents, 0, &tp);
950 
951 		/*
952 		 * Check for running out of space
953 		 */
954 		if (error) {
955 			/*
956 			 * Free the transaction structure.
957 			 */
958 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
959 			break;
960 		}
961 		xfs_ilock(ip, XFS_ILOCK_EXCL);
962 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
963 						      0, quota_flag);
964 		if (error)
965 			goto error1;
966 
967 		xfs_trans_ijoin(tp, ip, 0);
968 
969 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
970 					allocatesize_fsb, alloc_type, resblks,
971 					imapp, &nimaps);
972 		if (error)
973 			goto error0;
974 
975 		/*
976 		 * Complete the transaction
977 		 */
978 		error = xfs_trans_commit(tp);
979 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
980 		if (error)
981 			break;
982 
983 		allocated_fsb = imapp->br_blockcount;
984 
985 		if (nimaps == 0) {
986 			error = -ENOSPC;
987 			break;
988 		}
989 
990 		startoffset_fsb += allocated_fsb;
991 		allocatesize_fsb -= allocated_fsb;
992 	}
993 
994 	return error;
995 
996 error0:	/* unlock inode, unreserve quota blocks, cancel trans */
997 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
998 
999 error1:	/* Just cancel transaction */
1000 	xfs_trans_cancel(tp);
1001 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1002 	return error;
1003 }
1004 
1005 static int
xfs_unmap_extent(struct xfs_inode * ip,xfs_fileoff_t startoffset_fsb,xfs_filblks_t len_fsb,int * done)1006 xfs_unmap_extent(
1007 	struct xfs_inode	*ip,
1008 	xfs_fileoff_t		startoffset_fsb,
1009 	xfs_filblks_t		len_fsb,
1010 	int			*done)
1011 {
1012 	struct xfs_mount	*mp = ip->i_mount;
1013 	struct xfs_trans	*tp;
1014 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1015 	int			error;
1016 
1017 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1018 	if (error) {
1019 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1020 		return error;
1021 	}
1022 
1023 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1024 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1025 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1026 	if (error)
1027 		goto out_trans_cancel;
1028 
1029 	xfs_trans_ijoin(tp, ip, 0);
1030 
1031 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
1032 	if (error)
1033 		goto out_trans_cancel;
1034 
1035 	error = xfs_trans_commit(tp);
1036 out_unlock:
1037 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1038 	return error;
1039 
1040 out_trans_cancel:
1041 	xfs_trans_cancel(tp);
1042 	goto out_unlock;
1043 }
1044 
1045 static int
xfs_adjust_extent_unmap_boundaries(struct xfs_inode * ip,xfs_fileoff_t * startoffset_fsb,xfs_fileoff_t * endoffset_fsb)1046 xfs_adjust_extent_unmap_boundaries(
1047 	struct xfs_inode	*ip,
1048 	xfs_fileoff_t		*startoffset_fsb,
1049 	xfs_fileoff_t		*endoffset_fsb)
1050 {
1051 	struct xfs_mount	*mp = ip->i_mount;
1052 	struct xfs_bmbt_irec	imap;
1053 	int			nimap, error;
1054 	xfs_extlen_t		mod = 0;
1055 
1056 	nimap = 1;
1057 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1058 	if (error)
1059 		return error;
1060 
1061 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1062 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1063 		div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
1064 		if (mod)
1065 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1066 	}
1067 
1068 	nimap = 1;
1069 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1070 	if (error)
1071 		return error;
1072 
1073 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1074 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1075 		mod++;
1076 		if (mod && mod != mp->m_sb.sb_rextsize)
1077 			*endoffset_fsb -= mod;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 int
xfs_flush_unmap_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1084 xfs_flush_unmap_range(
1085 	struct xfs_inode	*ip,
1086 	xfs_off_t		offset,
1087 	xfs_off_t		len)
1088 {
1089 	struct xfs_mount	*mp = ip->i_mount;
1090 	struct inode		*inode = VFS_I(ip);
1091 	xfs_off_t		rounding, start, end;
1092 	int			error;
1093 
1094 	/* wait for the completion of any pending DIOs */
1095 	inode_dio_wait(inode);
1096 
1097 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1098 	start = round_down(offset, rounding);
1099 	end = round_up(offset + len, rounding) - 1;
1100 
1101 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1102 	if (error)
1103 		return error;
1104 	truncate_pagecache_range(inode, start, end);
1105 	return 0;
1106 }
1107 
1108 int
xfs_free_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1109 xfs_free_file_space(
1110 	struct xfs_inode	*ip,
1111 	xfs_off_t		offset,
1112 	xfs_off_t		len)
1113 {
1114 	struct xfs_mount	*mp = ip->i_mount;
1115 	xfs_fileoff_t		startoffset_fsb;
1116 	xfs_fileoff_t		endoffset_fsb;
1117 	int			done = 0, error;
1118 
1119 	trace_xfs_free_file_space(ip);
1120 
1121 	error = xfs_qm_dqattach(ip);
1122 	if (error)
1123 		return error;
1124 
1125 	if (len <= 0)	/* if nothing being freed */
1126 		return 0;
1127 
1128 	error = xfs_flush_unmap_range(ip, offset, len);
1129 	if (error)
1130 		return error;
1131 
1132 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1133 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1134 
1135 	/*
1136 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1137 	 * and we can't use unwritten extents then we actually need to ensure
1138 	 * to zero the whole extent, otherwise we just need to take of block
1139 	 * boundaries, and xfs_bunmapi will handle the rest.
1140 	 */
1141 	if (XFS_IS_REALTIME_INODE(ip) &&
1142 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1143 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1144 				&endoffset_fsb);
1145 		if (error)
1146 			return error;
1147 	}
1148 
1149 	if (endoffset_fsb > startoffset_fsb) {
1150 		while (!done) {
1151 			error = xfs_unmap_extent(ip, startoffset_fsb,
1152 					endoffset_fsb - startoffset_fsb, &done);
1153 			if (error)
1154 				return error;
1155 		}
1156 	}
1157 
1158 	/*
1159 	 * Now that we've unmap all full blocks we'll have to zero out any
1160 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
1161 	 * enough to skip any holes, including those we just created, but we
1162 	 * must take care not to zero beyond EOF and enlarge i_size.
1163 	 */
1164 	if (offset >= XFS_ISIZE(ip))
1165 		return 0;
1166 	if (offset + len > XFS_ISIZE(ip))
1167 		len = XFS_ISIZE(ip) - offset;
1168 	error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1169 	if (error)
1170 		return error;
1171 
1172 	/*
1173 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
1174 	 * must make sure that the post-EOF area is also zeroed because the
1175 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1176 	 * Writeback of the eof page will do this, albeit clumsily.
1177 	 */
1178 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1179 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1180 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1181 	}
1182 
1183 	return error;
1184 }
1185 
1186 /*
1187  * Preallocate and zero a range of a file. This mechanism has the allocation
1188  * semantics of fallocate and in addition converts data in the range to zeroes.
1189  */
1190 int
xfs_zero_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1191 xfs_zero_file_space(
1192 	struct xfs_inode	*ip,
1193 	xfs_off_t		offset,
1194 	xfs_off_t		len)
1195 {
1196 	struct xfs_mount	*mp = ip->i_mount;
1197 	uint			blksize;
1198 	int			error;
1199 
1200 	trace_xfs_zero_file_space(ip);
1201 
1202 	blksize = 1 << mp->m_sb.sb_blocklog;
1203 
1204 	/*
1205 	 * Punch a hole and prealloc the range. We use hole punch rather than
1206 	 * unwritten extent conversion for two reasons:
1207 	 *
1208 	 * 1.) Hole punch handles partial block zeroing for us.
1209 	 *
1210 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1211 	 * by virtue of the hole punch.
1212 	 */
1213 	error = xfs_free_file_space(ip, offset, len);
1214 	if (error)
1215 		goto out;
1216 
1217 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1218 				     round_up(offset + len, blksize) -
1219 				     round_down(offset, blksize),
1220 				     XFS_BMAPI_PREALLOC);
1221 out:
1222 	return error;
1223 
1224 }
1225 
1226 static int
xfs_prepare_shift(struct xfs_inode * ip,loff_t offset)1227 xfs_prepare_shift(
1228 	struct xfs_inode	*ip,
1229 	loff_t			offset)
1230 {
1231 	int			error;
1232 
1233 	/*
1234 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1235 	 * into the accessible region of the file.
1236 	 */
1237 	if (xfs_can_free_eofblocks(ip, true)) {
1238 		error = xfs_free_eofblocks(ip);
1239 		if (error)
1240 			return error;
1241 	}
1242 
1243 	/*
1244 	 * Writeback and invalidate cache for the remainder of the file as we're
1245 	 * about to shift down every extent from offset to EOF.
1246 	 */
1247 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1248 	if (error)
1249 		return error;
1250 
1251 	/*
1252 	 * Clean out anything hanging around in the cow fork now that
1253 	 * we've flushed all the dirty data out to disk to avoid having
1254 	 * CoW extents at the wrong offsets.
1255 	 */
1256 	if (xfs_inode_has_cow_data(ip)) {
1257 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1258 				true);
1259 		if (error)
1260 			return error;
1261 	}
1262 
1263 	return 0;
1264 }
1265 
1266 /*
1267  * xfs_collapse_file_space()
1268  *	This routine frees disk space and shift extent for the given file.
1269  *	The first thing we do is to free data blocks in the specified range
1270  *	by calling xfs_free_file_space(). It would also sync dirty data
1271  *	and invalidate page cache over the region on which collapse range
1272  *	is working. And Shift extent records to the left to cover a hole.
1273  * RETURNS:
1274  *	0 on success
1275  *	errno on error
1276  *
1277  */
1278 int
xfs_collapse_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1279 xfs_collapse_file_space(
1280 	struct xfs_inode	*ip,
1281 	xfs_off_t		offset,
1282 	xfs_off_t		len)
1283 {
1284 	struct xfs_mount	*mp = ip->i_mount;
1285 	struct xfs_trans	*tp;
1286 	int			error;
1287 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1288 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1289 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1290 	bool			done = false;
1291 
1292 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1293 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1294 
1295 	trace_xfs_collapse_file_space(ip);
1296 
1297 	error = xfs_free_file_space(ip, offset, len);
1298 	if (error)
1299 		return error;
1300 
1301 	error = xfs_prepare_shift(ip, offset);
1302 	if (error)
1303 		return error;
1304 
1305 	while (!error && !done) {
1306 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1307 					&tp);
1308 		if (error)
1309 			break;
1310 
1311 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1312 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1313 				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1314 				XFS_QMOPT_RES_REGBLKS);
1315 		if (error)
1316 			goto out_trans_cancel;
1317 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1318 
1319 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1320 				&done);
1321 		if (error)
1322 			goto out_trans_cancel;
1323 
1324 		error = xfs_trans_commit(tp);
1325 	}
1326 
1327 	return error;
1328 
1329 out_trans_cancel:
1330 	xfs_trans_cancel(tp);
1331 	return error;
1332 }
1333 
1334 /*
1335  * xfs_insert_file_space()
1336  *	This routine create hole space by shifting extents for the given file.
1337  *	The first thing we do is to sync dirty data and invalidate page cache
1338  *	over the region on which insert range is working. And split an extent
1339  *	to two extents at given offset by calling xfs_bmap_split_extent.
1340  *	And shift all extent records which are laying between [offset,
1341  *	last allocated extent] to the right to reserve hole range.
1342  * RETURNS:
1343  *	0 on success
1344  *	errno on error
1345  */
1346 int
xfs_insert_file_space(struct xfs_inode * ip,loff_t offset,loff_t len)1347 xfs_insert_file_space(
1348 	struct xfs_inode	*ip,
1349 	loff_t			offset,
1350 	loff_t			len)
1351 {
1352 	struct xfs_mount	*mp = ip->i_mount;
1353 	struct xfs_trans	*tp;
1354 	int			error;
1355 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1356 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1357 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1358 	bool			done = false;
1359 
1360 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1361 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1362 
1363 	trace_xfs_insert_file_space(ip);
1364 
1365 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1366 	if (error)
1367 		return error;
1368 
1369 	error = xfs_prepare_shift(ip, offset);
1370 	if (error)
1371 		return error;
1372 
1373 	/*
1374 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1375 	 * is not the starting block of extent, we need to split the extent at
1376 	 * stop_fsb.
1377 	 */
1378 	error = xfs_bmap_split_extent(ip, stop_fsb);
1379 	if (error)
1380 		return error;
1381 
1382 	while (!error && !done) {
1383 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1384 					&tp);
1385 		if (error)
1386 			break;
1387 
1388 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1389 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1390 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1391 				&done, stop_fsb);
1392 		if (error)
1393 			goto out_trans_cancel;
1394 
1395 		error = xfs_trans_commit(tp);
1396 	}
1397 
1398 	return error;
1399 
1400 out_trans_cancel:
1401 	xfs_trans_cancel(tp);
1402 	return error;
1403 }
1404 
1405 /*
1406  * We need to check that the format of the data fork in the temporary inode is
1407  * valid for the target inode before doing the swap. This is not a problem with
1408  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1409  * data fork depending on the space the attribute fork is taking so we can get
1410  * invalid formats on the target inode.
1411  *
1412  * E.g. target has space for 7 extents in extent format, temp inode only has
1413  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1414  * btree, but when swapped it needs to be in extent format. Hence we can't just
1415  * blindly swap data forks on attr2 filesystems.
1416  *
1417  * Note that we check the swap in both directions so that we don't end up with
1418  * a corrupt temporary inode, either.
1419  *
1420  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1421  * inode will prevent this situation from occurring, so all we do here is
1422  * reject and log the attempt. basically we are putting the responsibility on
1423  * userspace to get this right.
1424  */
1425 static int
xfs_swap_extents_check_format(struct xfs_inode * ip,struct xfs_inode * tip)1426 xfs_swap_extents_check_format(
1427 	struct xfs_inode	*ip,	/* target inode */
1428 	struct xfs_inode	*tip)	/* tmp inode */
1429 {
1430 
1431 	/* Should never get a local format */
1432 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1433 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1434 		return -EINVAL;
1435 
1436 	/*
1437 	 * if the target inode has less extents that then temporary inode then
1438 	 * why did userspace call us?
1439 	 */
1440 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1441 		return -EINVAL;
1442 
1443 	/*
1444 	 * If we have to use the (expensive) rmap swap method, we can
1445 	 * handle any number of extents and any format.
1446 	 */
1447 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1448 		return 0;
1449 
1450 	/*
1451 	 * if the target inode is in extent form and the temp inode is in btree
1452 	 * form then we will end up with the target inode in the wrong format
1453 	 * as we already know there are less extents in the temp inode.
1454 	 */
1455 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1456 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1457 		return -EINVAL;
1458 
1459 	/* Check temp in extent form to max in target */
1460 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1461 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1462 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1463 		return -EINVAL;
1464 
1465 	/* Check target in extent form to max in temp */
1466 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1467 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1468 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1469 		return -EINVAL;
1470 
1471 	/*
1472 	 * If we are in a btree format, check that the temp root block will fit
1473 	 * in the target and that it has enough extents to be in btree format
1474 	 * in the target.
1475 	 *
1476 	 * Note that we have to be careful to allow btree->extent conversions
1477 	 * (a common defrag case) which will occur when the temp inode is in
1478 	 * extent format...
1479 	 */
1480 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1481 		if (XFS_IFORK_Q(ip) &&
1482 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1483 			return -EINVAL;
1484 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1485 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1486 			return -EINVAL;
1487 	}
1488 
1489 	/* Reciprocal target->temp btree format checks */
1490 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1491 		if (XFS_IFORK_Q(tip) &&
1492 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1493 			return -EINVAL;
1494 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1495 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1496 			return -EINVAL;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 static int
xfs_swap_extent_flush(struct xfs_inode * ip)1503 xfs_swap_extent_flush(
1504 	struct xfs_inode	*ip)
1505 {
1506 	int	error;
1507 
1508 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1509 	if (error)
1510 		return error;
1511 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1512 
1513 	/* Verify O_DIRECT for ftmp */
1514 	if (VFS_I(ip)->i_mapping->nrpages)
1515 		return -EINVAL;
1516 	return 0;
1517 }
1518 
1519 /*
1520  * Move extents from one file to another, when rmap is enabled.
1521  */
1522 STATIC int
xfs_swap_extent_rmap(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tip)1523 xfs_swap_extent_rmap(
1524 	struct xfs_trans		**tpp,
1525 	struct xfs_inode		*ip,
1526 	struct xfs_inode		*tip)
1527 {
1528 	struct xfs_trans		*tp = *tpp;
1529 	struct xfs_bmbt_irec		irec;
1530 	struct xfs_bmbt_irec		uirec;
1531 	struct xfs_bmbt_irec		tirec;
1532 	xfs_fileoff_t			offset_fsb;
1533 	xfs_fileoff_t			end_fsb;
1534 	xfs_filblks_t			count_fsb;
1535 	int				error;
1536 	xfs_filblks_t			ilen;
1537 	xfs_filblks_t			rlen;
1538 	int				nimaps;
1539 	uint64_t			tip_flags2;
1540 
1541 	/*
1542 	 * If the source file has shared blocks, we must flag the donor
1543 	 * file as having shared blocks so that we get the shared-block
1544 	 * rmap functions when we go to fix up the rmaps.  The flags
1545 	 * will be switch for reals later.
1546 	 */
1547 	tip_flags2 = tip->i_d.di_flags2;
1548 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1549 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1550 
1551 	offset_fsb = 0;
1552 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1553 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1554 
1555 	while (count_fsb) {
1556 		/* Read extent from the donor file */
1557 		nimaps = 1;
1558 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1559 				&nimaps, 0);
1560 		if (error)
1561 			goto out;
1562 		ASSERT(nimaps == 1);
1563 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1564 
1565 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1566 		ilen = tirec.br_blockcount;
1567 
1568 		/* Unmap the old blocks in the source file. */
1569 		while (tirec.br_blockcount) {
1570 			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1571 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1572 
1573 			/* Read extent from the source file */
1574 			nimaps = 1;
1575 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1576 					tirec.br_blockcount, &irec,
1577 					&nimaps, 0);
1578 			if (error)
1579 				goto out;
1580 			ASSERT(nimaps == 1);
1581 			ASSERT(tirec.br_startoff == irec.br_startoff);
1582 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1583 
1584 			/* Trim the extent. */
1585 			uirec = tirec;
1586 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1587 					tirec.br_blockcount,
1588 					irec.br_blockcount);
1589 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1590 
1591 			/* Remove the mapping from the donor file. */
1592 			error = xfs_bmap_unmap_extent(tp, tip, &uirec);
1593 			if (error)
1594 				goto out;
1595 
1596 			/* Remove the mapping from the source file. */
1597 			error = xfs_bmap_unmap_extent(tp, ip, &irec);
1598 			if (error)
1599 				goto out;
1600 
1601 			/* Map the donor file's blocks into the source file. */
1602 			error = xfs_bmap_map_extent(tp, ip, &uirec);
1603 			if (error)
1604 				goto out;
1605 
1606 			/* Map the source file's blocks into the donor file. */
1607 			error = xfs_bmap_map_extent(tp, tip, &irec);
1608 			if (error)
1609 				goto out;
1610 
1611 			error = xfs_defer_finish(tpp);
1612 			tp = *tpp;
1613 			if (error)
1614 				goto out;
1615 
1616 			tirec.br_startoff += rlen;
1617 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1618 			    tirec.br_startblock != DELAYSTARTBLOCK)
1619 				tirec.br_startblock += rlen;
1620 			tirec.br_blockcount -= rlen;
1621 		}
1622 
1623 		/* Roll on... */
1624 		count_fsb -= ilen;
1625 		offset_fsb += ilen;
1626 	}
1627 
1628 	tip->i_d.di_flags2 = tip_flags2;
1629 	return 0;
1630 
1631 out:
1632 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1633 	tip->i_d.di_flags2 = tip_flags2;
1634 	return error;
1635 }
1636 
1637 /* Swap the extents of two files by swapping data forks. */
1638 STATIC int
xfs_swap_extent_forks(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_inode * tip,int * src_log_flags,int * target_log_flags)1639 xfs_swap_extent_forks(
1640 	struct xfs_trans	*tp,
1641 	struct xfs_inode	*ip,
1642 	struct xfs_inode	*tip,
1643 	int			*src_log_flags,
1644 	int			*target_log_flags)
1645 {
1646 	xfs_filblks_t		aforkblks = 0;
1647 	xfs_filblks_t		taforkblks = 0;
1648 	xfs_extnum_t		junk;
1649 	uint64_t		tmp;
1650 	int			error;
1651 
1652 	/*
1653 	 * Count the number of extended attribute blocks
1654 	 */
1655 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1656 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1657 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1658 				&aforkblks);
1659 		if (error)
1660 			return error;
1661 	}
1662 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1663 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1664 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1665 				&taforkblks);
1666 		if (error)
1667 			return error;
1668 	}
1669 
1670 	/*
1671 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1672 	 * block headers. We can't start changing the bmbt blocks until the
1673 	 * inode owner change is logged so recovery does the right thing in the
1674 	 * event of a crash. Set the owner change log flags now and leave the
1675 	 * bmbt scan as the last step.
1676 	 */
1677 	if (ip->i_d.di_version == 3 &&
1678 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1679 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1680 	if (tip->i_d.di_version == 3 &&
1681 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1682 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1683 
1684 	/*
1685 	 * Swap the data forks of the inodes
1686 	 */
1687 	swap(ip->i_df, tip->i_df);
1688 
1689 	/*
1690 	 * Fix the on-disk inode values
1691 	 */
1692 	tmp = (uint64_t)ip->i_d.di_nblocks;
1693 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1694 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1695 
1696 	swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
1697 	swap(ip->i_d.di_format, tip->i_d.di_format);
1698 
1699 	/*
1700 	 * The extents in the source inode could still contain speculative
1701 	 * preallocation beyond EOF (e.g. the file is open but not modified
1702 	 * while defrag is in progress). In that case, we need to copy over the
1703 	 * number of delalloc blocks the data fork in the source inode is
1704 	 * tracking beyond EOF so that when the fork is truncated away when the
1705 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1706 	 * counter on that inode.
1707 	 */
1708 	ASSERT(tip->i_delayed_blks == 0);
1709 	tip->i_delayed_blks = ip->i_delayed_blks;
1710 	ip->i_delayed_blks = 0;
1711 
1712 	switch (ip->i_d.di_format) {
1713 	case XFS_DINODE_FMT_EXTENTS:
1714 		(*src_log_flags) |= XFS_ILOG_DEXT;
1715 		break;
1716 	case XFS_DINODE_FMT_BTREE:
1717 		ASSERT(ip->i_d.di_version < 3 ||
1718 		       (*src_log_flags & XFS_ILOG_DOWNER));
1719 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1720 		break;
1721 	}
1722 
1723 	switch (tip->i_d.di_format) {
1724 	case XFS_DINODE_FMT_EXTENTS:
1725 		(*target_log_flags) |= XFS_ILOG_DEXT;
1726 		break;
1727 	case XFS_DINODE_FMT_BTREE:
1728 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1729 		ASSERT(tip->i_d.di_version < 3 ||
1730 		       (*target_log_flags & XFS_ILOG_DOWNER));
1731 		break;
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 /*
1738  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1739  * change owner scan attempts to order all modified buffers in the current
1740  * transaction. In the event of ordered buffer failure, the offending buffer is
1741  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1742  * the transaction in this case to replenish the fallback log reservation and
1743  * restart the scan. This process repeats until the scan completes.
1744  */
1745 static int
xfs_swap_change_owner(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tmpip)1746 xfs_swap_change_owner(
1747 	struct xfs_trans	**tpp,
1748 	struct xfs_inode	*ip,
1749 	struct xfs_inode	*tmpip)
1750 {
1751 	int			error;
1752 	struct xfs_trans	*tp = *tpp;
1753 
1754 	do {
1755 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1756 					      NULL);
1757 		/* success or fatal error */
1758 		if (error != -EAGAIN)
1759 			break;
1760 
1761 		error = xfs_trans_roll(tpp);
1762 		if (error)
1763 			break;
1764 		tp = *tpp;
1765 
1766 		/*
1767 		 * Redirty both inodes so they can relog and keep the log tail
1768 		 * moving forward.
1769 		 */
1770 		xfs_trans_ijoin(tp, ip, 0);
1771 		xfs_trans_ijoin(tp, tmpip, 0);
1772 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1773 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1774 	} while (true);
1775 
1776 	return error;
1777 }
1778 
1779 int
xfs_swap_extents(struct xfs_inode * ip,struct xfs_inode * tip,struct xfs_swapext * sxp)1780 xfs_swap_extents(
1781 	struct xfs_inode	*ip,	/* target inode */
1782 	struct xfs_inode	*tip,	/* tmp inode */
1783 	struct xfs_swapext	*sxp)
1784 {
1785 	struct xfs_mount	*mp = ip->i_mount;
1786 	struct xfs_trans	*tp;
1787 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1788 	int			src_log_flags, target_log_flags;
1789 	int			error = 0;
1790 	int			lock_flags;
1791 	uint64_t		f;
1792 	int			resblks = 0;
1793 
1794 	/*
1795 	 * Lock the inodes against other IO, page faults and truncate to
1796 	 * begin with.  Then we can ensure the inodes are flushed and have no
1797 	 * page cache safely. Once we have done this we can take the ilocks and
1798 	 * do the rest of the checks.
1799 	 */
1800 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1801 	lock_flags = XFS_MMAPLOCK_EXCL;
1802 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1803 
1804 	/* Verify that both files have the same format */
1805 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1806 		error = -EINVAL;
1807 		goto out_unlock;
1808 	}
1809 
1810 	/* Verify both files are either real-time or non-realtime */
1811 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1812 		error = -EINVAL;
1813 		goto out_unlock;
1814 	}
1815 
1816 	error = xfs_swap_extent_flush(ip);
1817 	if (error)
1818 		goto out_unlock;
1819 	error = xfs_swap_extent_flush(tip);
1820 	if (error)
1821 		goto out_unlock;
1822 
1823 	if (xfs_inode_has_cow_data(tip)) {
1824 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1825 		if (error)
1826 			goto out_unlock;
1827 	}
1828 
1829 	/*
1830 	 * Extent "swapping" with rmap requires a permanent reservation and
1831 	 * a block reservation because it's really just a remap operation
1832 	 * performed with log redo items!
1833 	 */
1834 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1835 		int		w	= XFS_DATA_FORK;
1836 		uint32_t	ipnext	= XFS_IFORK_NEXTENTS(ip, w);
1837 		uint32_t	tipnext	= XFS_IFORK_NEXTENTS(tip, w);
1838 
1839 		/*
1840 		 * Conceptually this shouldn't affect the shape of either bmbt,
1841 		 * but since we atomically move extents one by one, we reserve
1842 		 * enough space to rebuild both trees.
1843 		 */
1844 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1845 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1846 
1847 		/*
1848 		 * Handle the corner case where either inode might straddle the
1849 		 * btree format boundary. If so, the inode could bounce between
1850 		 * btree <-> extent format on unmap -> remap cycles, freeing and
1851 		 * allocating a bmapbt block each time.
1852 		 */
1853 		if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1854 			resblks += XFS_IFORK_MAXEXT(ip, w);
1855 		if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1856 			resblks += XFS_IFORK_MAXEXT(tip, w);
1857 	}
1858 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1859 	if (error)
1860 		goto out_unlock;
1861 
1862 	/*
1863 	 * Lock and join the inodes to the tansaction so that transaction commit
1864 	 * or cancel will unlock the inodes from this point onwards.
1865 	 */
1866 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1867 	lock_flags |= XFS_ILOCK_EXCL;
1868 	xfs_trans_ijoin(tp, ip, 0);
1869 	xfs_trans_ijoin(tp, tip, 0);
1870 
1871 
1872 	/* Verify all data are being swapped */
1873 	if (sxp->sx_offset != 0 ||
1874 	    sxp->sx_length != ip->i_d.di_size ||
1875 	    sxp->sx_length != tip->i_d.di_size) {
1876 		error = -EFAULT;
1877 		goto out_trans_cancel;
1878 	}
1879 
1880 	trace_xfs_swap_extent_before(ip, 0);
1881 	trace_xfs_swap_extent_before(tip, 1);
1882 
1883 	/* check inode formats now that data is flushed */
1884 	error = xfs_swap_extents_check_format(ip, tip);
1885 	if (error) {
1886 		xfs_notice(mp,
1887 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1888 				__func__, ip->i_ino);
1889 		goto out_trans_cancel;
1890 	}
1891 
1892 	/*
1893 	 * Compare the current change & modify times with that
1894 	 * passed in.  If they differ, we abort this swap.
1895 	 * This is the mechanism used to ensure the calling
1896 	 * process that the file was not changed out from
1897 	 * under it.
1898 	 */
1899 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1900 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1901 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1902 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1903 		error = -EBUSY;
1904 		goto out_trans_cancel;
1905 	}
1906 
1907 	/*
1908 	 * Note the trickiness in setting the log flags - we set the owner log
1909 	 * flag on the opposite inode (i.e. the inode we are setting the new
1910 	 * owner to be) because once we swap the forks and log that, log
1911 	 * recovery is going to see the fork as owned by the swapped inode,
1912 	 * not the pre-swapped inodes.
1913 	 */
1914 	src_log_flags = XFS_ILOG_CORE;
1915 	target_log_flags = XFS_ILOG_CORE;
1916 
1917 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1918 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1919 	else
1920 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1921 				&target_log_flags);
1922 	if (error)
1923 		goto out_trans_cancel;
1924 
1925 	/* Do we have to swap reflink flags? */
1926 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1927 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1928 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1929 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1930 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1931 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1932 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1933 	}
1934 
1935 	/* Swap the cow forks. */
1936 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1937 		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1938 		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1939 
1940 		swap(ip->i_cnextents, tip->i_cnextents);
1941 		swap(ip->i_cowfp, tip->i_cowfp);
1942 
1943 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1944 			xfs_inode_set_cowblocks_tag(ip);
1945 		else
1946 			xfs_inode_clear_cowblocks_tag(ip);
1947 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1948 			xfs_inode_set_cowblocks_tag(tip);
1949 		else
1950 			xfs_inode_clear_cowblocks_tag(tip);
1951 	}
1952 
1953 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1954 	xfs_trans_log_inode(tp, tip, target_log_flags);
1955 
1956 	/*
1957 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1958 	 * have inode number owner values in the bmbt blocks that still refer to
1959 	 * the old inode. Scan each bmbt to fix up the owner values with the
1960 	 * inode number of the current inode.
1961 	 */
1962 	if (src_log_flags & XFS_ILOG_DOWNER) {
1963 		error = xfs_swap_change_owner(&tp, ip, tip);
1964 		if (error)
1965 			goto out_trans_cancel;
1966 	}
1967 	if (target_log_flags & XFS_ILOG_DOWNER) {
1968 		error = xfs_swap_change_owner(&tp, tip, ip);
1969 		if (error)
1970 			goto out_trans_cancel;
1971 	}
1972 
1973 	/*
1974 	 * If this is a synchronous mount, make sure that the
1975 	 * transaction goes to disk before returning to the user.
1976 	 */
1977 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1978 		xfs_trans_set_sync(tp);
1979 
1980 	error = xfs_trans_commit(tp);
1981 
1982 	trace_xfs_swap_extent_after(ip, 0);
1983 	trace_xfs_swap_extent_after(tip, 1);
1984 
1985 out_unlock:
1986 	xfs_iunlock(ip, lock_flags);
1987 	xfs_iunlock(tip, lock_flags);
1988 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1989 	return error;
1990 
1991 out_trans_cancel:
1992 	xfs_trans_cancel(tp);
1993 	goto out_unlock;
1994 }
1995