1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_btree.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_ialloc_btree.h"
17 #include "xfs_itable.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 
22 /*
23  * Return stat information for one inode.
24  * Return 0 if ok, else errno.
25  */
26 int
xfs_bulkstat_one_int(struct xfs_mount * mp,xfs_ino_t ino,void __user * buffer,int ubsize,bulkstat_one_fmt_pf formatter,int * ubused,int * stat)27 xfs_bulkstat_one_int(
28 	struct xfs_mount	*mp,		/* mount point for filesystem */
29 	xfs_ino_t		ino,		/* inode to get data for */
30 	void __user		*buffer,	/* buffer to place output in */
31 	int			ubsize,		/* size of buffer */
32 	bulkstat_one_fmt_pf	formatter,	/* formatter, copy to user */
33 	int			*ubused,	/* bytes used by me */
34 	int			*stat)		/* BULKSTAT_RV_... */
35 {
36 	struct xfs_icdinode	*dic;		/* dinode core info pointer */
37 	struct xfs_inode	*ip;		/* incore inode pointer */
38 	struct inode		*inode;
39 	struct xfs_bstat	*buf;		/* return buffer */
40 	int			error = 0;	/* error value */
41 
42 	*stat = BULKSTAT_RV_NOTHING;
43 
44 	if (!buffer || xfs_internal_inum(mp, ino))
45 		return -EINVAL;
46 
47 	buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
48 	if (!buf)
49 		return -ENOMEM;
50 
51 	error = xfs_iget(mp, NULL, ino,
52 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
53 			 XFS_ILOCK_SHARED, &ip);
54 	if (error)
55 		goto out_free;
56 
57 	ASSERT(ip != NULL);
58 	ASSERT(ip->i_imap.im_blkno != 0);
59 	inode = VFS_I(ip);
60 
61 	dic = &ip->i_d;
62 
63 	/* xfs_iget returns the following without needing
64 	 * further change.
65 	 */
66 	buf->bs_projid_lo = dic->di_projid_lo;
67 	buf->bs_projid_hi = dic->di_projid_hi;
68 	buf->bs_ino = ino;
69 	buf->bs_uid = dic->di_uid;
70 	buf->bs_gid = dic->di_gid;
71 	buf->bs_size = dic->di_size;
72 
73 	buf->bs_nlink = inode->i_nlink;
74 	buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
75 	buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
76 	buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
77 	buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
78 	buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
79 	buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
80 	buf->bs_gen = inode->i_generation;
81 	buf->bs_mode = inode->i_mode;
82 
83 	buf->bs_xflags = xfs_ip2xflags(ip);
84 	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
85 	buf->bs_extents = dic->di_nextents;
86 	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
87 	buf->bs_dmevmask = dic->di_dmevmask;
88 	buf->bs_dmstate = dic->di_dmstate;
89 	buf->bs_aextents = dic->di_anextents;
90 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
91 
92 	if (dic->di_version == 3) {
93 		if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
94 			buf->bs_cowextsize = dic->di_cowextsize <<
95 					mp->m_sb.sb_blocklog;
96 	}
97 
98 	switch (dic->di_format) {
99 	case XFS_DINODE_FMT_DEV:
100 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
101 		buf->bs_blksize = BLKDEV_IOSIZE;
102 		buf->bs_blocks = 0;
103 		break;
104 	case XFS_DINODE_FMT_LOCAL:
105 		buf->bs_rdev = 0;
106 		buf->bs_blksize = mp->m_sb.sb_blocksize;
107 		buf->bs_blocks = 0;
108 		break;
109 	case XFS_DINODE_FMT_EXTENTS:
110 	case XFS_DINODE_FMT_BTREE:
111 		buf->bs_rdev = 0;
112 		buf->bs_blksize = mp->m_sb.sb_blocksize;
113 		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
114 		break;
115 	}
116 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
117 	xfs_irele(ip);
118 
119 	error = formatter(buffer, ubsize, ubused, buf);
120 	if (!error)
121 		*stat = BULKSTAT_RV_DIDONE;
122 
123  out_free:
124 	kmem_free(buf);
125 	return error;
126 }
127 
128 /* Return 0 on success or positive error */
129 STATIC int
xfs_bulkstat_one_fmt(void __user * ubuffer,int ubsize,int * ubused,const xfs_bstat_t * buffer)130 xfs_bulkstat_one_fmt(
131 	void			__user *ubuffer,
132 	int			ubsize,
133 	int			*ubused,
134 	const xfs_bstat_t	*buffer)
135 {
136 	if (ubsize < sizeof(*buffer))
137 		return -ENOMEM;
138 	if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
139 		return -EFAULT;
140 	if (ubused)
141 		*ubused = sizeof(*buffer);
142 	return 0;
143 }
144 
145 int
xfs_bulkstat_one(xfs_mount_t * mp,xfs_ino_t ino,void __user * buffer,int ubsize,int * ubused,int * stat)146 xfs_bulkstat_one(
147 	xfs_mount_t	*mp,		/* mount point for filesystem */
148 	xfs_ino_t	ino,		/* inode number to get data for */
149 	void		__user *buffer,	/* buffer to place output in */
150 	int		ubsize,		/* size of buffer */
151 	int		*ubused,	/* bytes used by me */
152 	int		*stat)		/* BULKSTAT_RV_... */
153 {
154 	return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
155 				    xfs_bulkstat_one_fmt, ubused, stat);
156 }
157 
158 /*
159  * Loop over all clusters in a chunk for a given incore inode allocation btree
160  * record.  Do a readahead if there are any allocated inodes in that cluster.
161  */
162 STATIC void
xfs_bulkstat_ichunk_ra(struct xfs_mount * mp,xfs_agnumber_t agno,struct xfs_inobt_rec_incore * irec)163 xfs_bulkstat_ichunk_ra(
164 	struct xfs_mount		*mp,
165 	xfs_agnumber_t			agno,
166 	struct xfs_inobt_rec_incore	*irec)
167 {
168 	xfs_agblock_t			agbno;
169 	struct blk_plug			plug;
170 	int				blks_per_cluster;
171 	int				inodes_per_cluster;
172 	int				i;	/* inode chunk index */
173 
174 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
175 	blks_per_cluster = xfs_icluster_size_fsb(mp);
176 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
177 
178 	blk_start_plug(&plug);
179 	for (i = 0; i < XFS_INODES_PER_CHUNK;
180 	     i += inodes_per_cluster, agbno += blks_per_cluster) {
181 		if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
182 			xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
183 					     &xfs_inode_buf_ops);
184 		}
185 	}
186 	blk_finish_plug(&plug);
187 }
188 
189 /*
190  * Lookup the inode chunk that the given inode lives in and then get the record
191  * if we found the chunk.  If the inode was not the last in the chunk and there
192  * are some left allocated, update the data for the pointed-to record as well as
193  * return the count of grabbed inodes.
194  */
195 STATIC int
xfs_bulkstat_grab_ichunk(struct xfs_btree_cur * cur,xfs_agino_t agino,int * icount,struct xfs_inobt_rec_incore * irec)196 xfs_bulkstat_grab_ichunk(
197 	struct xfs_btree_cur		*cur,	/* btree cursor */
198 	xfs_agino_t			agino,	/* starting inode of chunk */
199 	int				*icount,/* return # of inodes grabbed */
200 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
201 {
202 	int				idx;	/* index into inode chunk */
203 	int				stat;
204 	int				error = 0;
205 
206 	/* Lookup the inode chunk that this inode lives in */
207 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
208 	if (error)
209 		return error;
210 	if (!stat) {
211 		*icount = 0;
212 		return error;
213 	}
214 
215 	/* Get the record, should always work */
216 	error = xfs_inobt_get_rec(cur, irec, &stat);
217 	if (error)
218 		return error;
219 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
220 
221 	/* Check if the record contains the inode in request */
222 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
223 		*icount = 0;
224 		return 0;
225 	}
226 
227 	idx = agino - irec->ir_startino + 1;
228 	if (idx < XFS_INODES_PER_CHUNK &&
229 	    (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
230 		int	i;
231 
232 		/* We got a right chunk with some left inodes allocated at it.
233 		 * Grab the chunk record.  Mark all the uninteresting inodes
234 		 * free -- because they're before our start point.
235 		 */
236 		for (i = 0; i < idx; i++) {
237 			if (XFS_INOBT_MASK(i) & ~irec->ir_free)
238 				irec->ir_freecount++;
239 		}
240 
241 		irec->ir_free |= xfs_inobt_maskn(0, idx);
242 		*icount = irec->ir_count - irec->ir_freecount;
243 	}
244 
245 	return 0;
246 }
247 
248 #define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
249 
250 struct xfs_bulkstat_agichunk {
251 	char		__user **ac_ubuffer;/* pointer into user's buffer */
252 	int		ac_ubleft;	/* bytes left in user's buffer */
253 	int		ac_ubelem;	/* spaces used in user's buffer */
254 };
255 
256 /*
257  * Process inodes in chunk with a pointer to a formatter function
258  * that will iget the inode and fill in the appropriate structure.
259  */
260 static int
xfs_bulkstat_ag_ichunk(struct xfs_mount * mp,xfs_agnumber_t agno,struct xfs_inobt_rec_incore * irbp,bulkstat_one_pf formatter,size_t statstruct_size,struct xfs_bulkstat_agichunk * acp,xfs_agino_t * last_agino)261 xfs_bulkstat_ag_ichunk(
262 	struct xfs_mount		*mp,
263 	xfs_agnumber_t			agno,
264 	struct xfs_inobt_rec_incore	*irbp,
265 	bulkstat_one_pf			formatter,
266 	size_t				statstruct_size,
267 	struct xfs_bulkstat_agichunk	*acp,
268 	xfs_agino_t			*last_agino)
269 {
270 	char				__user **ubufp = acp->ac_ubuffer;
271 	int				chunkidx;
272 	int				error = 0;
273 	xfs_agino_t			agino = irbp->ir_startino;
274 
275 	for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
276 	     chunkidx++, agino++) {
277 		int		fmterror;
278 		int		ubused;
279 
280 		/* inode won't fit in buffer, we are done */
281 		if (acp->ac_ubleft < statstruct_size)
282 			break;
283 
284 		/* Skip if this inode is free */
285 		if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
286 			continue;
287 
288 		/* Get the inode and fill in a single buffer */
289 		ubused = statstruct_size;
290 		error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
291 				  *ubufp, acp->ac_ubleft, &ubused, &fmterror);
292 
293 		if (fmterror == BULKSTAT_RV_GIVEUP ||
294 		    (error && error != -ENOENT && error != -EINVAL)) {
295 			acp->ac_ubleft = 0;
296 			ASSERT(error);
297 			break;
298 		}
299 
300 		/* be careful not to leak error if at end of chunk */
301 		if (fmterror == BULKSTAT_RV_NOTHING || error) {
302 			error = 0;
303 			continue;
304 		}
305 
306 		*ubufp += ubused;
307 		acp->ac_ubleft -= ubused;
308 		acp->ac_ubelem++;
309 	}
310 
311 	/*
312 	 * Post-update *last_agino. At this point, agino will always point one
313 	 * inode past the last inode we processed successfully. Hence we
314 	 * substract that inode when setting the *last_agino cursor so that we
315 	 * return the correct cookie to userspace. On the next bulkstat call,
316 	 * the inode under the lastino cookie will be skipped as we have already
317 	 * processed it here.
318 	 */
319 	*last_agino = agino - 1;
320 
321 	return error;
322 }
323 
324 /*
325  * Return stat information in bulk (by-inode) for the filesystem.
326  */
327 int					/* error status */
xfs_bulkstat(xfs_mount_t * mp,xfs_ino_t * lastinop,int * ubcountp,bulkstat_one_pf formatter,size_t statstruct_size,char __user * ubuffer,int * done)328 xfs_bulkstat(
329 	xfs_mount_t		*mp,	/* mount point for filesystem */
330 	xfs_ino_t		*lastinop, /* last inode returned */
331 	int			*ubcountp, /* size of buffer/count returned */
332 	bulkstat_one_pf		formatter, /* func that'd fill a single buf */
333 	size_t			statstruct_size, /* sizeof struct filling */
334 	char			__user *ubuffer, /* buffer with inode stats */
335 	int			*done)	/* 1 if there are more stats to get */
336 {
337 	xfs_buf_t		*agbp;	/* agi header buffer */
338 	xfs_agino_t		agino;	/* inode # in allocation group */
339 	xfs_agnumber_t		agno;	/* allocation group number */
340 	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
341 	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
342 	int			nirbuf;	/* size of irbuf */
343 	int			ubcount; /* size of user's buffer */
344 	struct xfs_bulkstat_agichunk ac;
345 	int			error = 0;
346 
347 	/*
348 	 * Get the last inode value, see if there's nothing to do.
349 	 */
350 	agno = XFS_INO_TO_AGNO(mp, *lastinop);
351 	agino = XFS_INO_TO_AGINO(mp, *lastinop);
352 	if (agno >= mp->m_sb.sb_agcount ||
353 	    *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
354 		*done = 1;
355 		*ubcountp = 0;
356 		return 0;
357 	}
358 
359 	ubcount = *ubcountp; /* statstruct's */
360 	ac.ac_ubuffer = &ubuffer;
361 	ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
362 	ac.ac_ubelem = 0;
363 
364 	*ubcountp = 0;
365 	*done = 0;
366 
367 	irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
368 	if (!irbuf)
369 		return -ENOMEM;
370 	nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
371 
372 	/*
373 	 * Loop over the allocation groups, starting from the last
374 	 * inode returned; 0 means start of the allocation group.
375 	 */
376 	while (agno < mp->m_sb.sb_agcount) {
377 		struct xfs_inobt_rec_incore	*irbp = irbuf;
378 		struct xfs_inobt_rec_incore	*irbufend = irbuf + nirbuf;
379 		bool				end_of_ag = false;
380 		int				icount = 0;
381 		int				stat;
382 
383 		error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
384 		if (error)
385 			break;
386 		/*
387 		 * Allocate and initialize a btree cursor for ialloc btree.
388 		 */
389 		cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
390 					    XFS_BTNUM_INO);
391 		if (agino > 0) {
392 			/*
393 			 * In the middle of an allocation group, we need to get
394 			 * the remainder of the chunk we're in.
395 			 */
396 			struct xfs_inobt_rec_incore	r;
397 
398 			error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
399 			if (error)
400 				goto del_cursor;
401 			if (icount) {
402 				irbp->ir_startino = r.ir_startino;
403 				irbp->ir_holemask = r.ir_holemask;
404 				irbp->ir_count = r.ir_count;
405 				irbp->ir_freecount = r.ir_freecount;
406 				irbp->ir_free = r.ir_free;
407 				irbp++;
408 			}
409 			/* Increment to the next record */
410 			error = xfs_btree_increment(cur, 0, &stat);
411 		} else {
412 			/* Start of ag.  Lookup the first inode chunk */
413 			error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
414 		}
415 		if (error || stat == 0) {
416 			end_of_ag = true;
417 			goto del_cursor;
418 		}
419 
420 		/*
421 		 * Loop through inode btree records in this ag,
422 		 * until we run out of inodes or space in the buffer.
423 		 */
424 		while (irbp < irbufend && icount < ubcount) {
425 			struct xfs_inobt_rec_incore	r;
426 
427 			error = xfs_inobt_get_rec(cur, &r, &stat);
428 			if (error || stat == 0) {
429 				end_of_ag = true;
430 				goto del_cursor;
431 			}
432 
433 			/*
434 			 * If this chunk has any allocated inodes, save it.
435 			 * Also start read-ahead now for this chunk.
436 			 */
437 			if (r.ir_freecount < r.ir_count) {
438 				xfs_bulkstat_ichunk_ra(mp, agno, &r);
439 				irbp->ir_startino = r.ir_startino;
440 				irbp->ir_holemask = r.ir_holemask;
441 				irbp->ir_count = r.ir_count;
442 				irbp->ir_freecount = r.ir_freecount;
443 				irbp->ir_free = r.ir_free;
444 				irbp++;
445 				icount += r.ir_count - r.ir_freecount;
446 			}
447 			error = xfs_btree_increment(cur, 0, &stat);
448 			if (error || stat == 0) {
449 				end_of_ag = true;
450 				goto del_cursor;
451 			}
452 			cond_resched();
453 		}
454 
455 		/*
456 		 * Drop the btree buffers and the agi buffer as we can't hold any
457 		 * of the locks these represent when calling iget. If there is a
458 		 * pending error, then we are done.
459 		 */
460 del_cursor:
461 		xfs_btree_del_cursor(cur, error);
462 		xfs_buf_relse(agbp);
463 		if (error)
464 			break;
465 		/*
466 		 * Now format all the good inodes into the user's buffer. The
467 		 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
468 		 * for the next loop iteration.
469 		 */
470 		irbufend = irbp;
471 		for (irbp = irbuf;
472 		     irbp < irbufend && ac.ac_ubleft >= statstruct_size;
473 		     irbp++) {
474 			error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
475 					formatter, statstruct_size, &ac,
476 					&agino);
477 			if (error)
478 				break;
479 
480 			cond_resched();
481 		}
482 
483 		/*
484 		 * If we've run out of space or had a formatting error, we
485 		 * are now done
486 		 */
487 		if (ac.ac_ubleft < statstruct_size || error)
488 			break;
489 
490 		if (end_of_ag) {
491 			agno++;
492 			agino = 0;
493 		}
494 	}
495 	/*
496 	 * Done, we're either out of filesystem or space to put the data.
497 	 */
498 	kmem_free(irbuf);
499 	*ubcountp = ac.ac_ubelem;
500 
501 	/*
502 	 * We found some inodes, so clear the error status and return them.
503 	 * The lastino pointer will point directly at the inode that triggered
504 	 * any error that occurred, so on the next call the error will be
505 	 * triggered again and propagated to userspace as there will be no
506 	 * formatted inodes in the buffer.
507 	 */
508 	if (ac.ac_ubelem)
509 		error = 0;
510 
511 	/*
512 	 * If we ran out of filesystem, lastino will point off the end of
513 	 * the filesystem so the next call will return immediately.
514 	 */
515 	*lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
516 	if (agno >= mp->m_sb.sb_agcount)
517 		*done = 1;
518 
519 	return error;
520 }
521 
522 int
xfs_inumbers_fmt(void __user * ubuffer,const struct xfs_inogrp * buffer,long count,long * written)523 xfs_inumbers_fmt(
524 	void			__user *ubuffer, /* buffer to write to */
525 	const struct xfs_inogrp	*buffer,	/* buffer to read from */
526 	long			count,		/* # of elements to read */
527 	long			*written)	/* # of bytes written */
528 {
529 	if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
530 		return -EFAULT;
531 	*written = count * sizeof(*buffer);
532 	return 0;
533 }
534 
535 /*
536  * Return inode number table for the filesystem.
537  */
538 int					/* error status */
xfs_inumbers(struct xfs_mount * mp,xfs_ino_t * lastino,int * count,void __user * ubuffer,inumbers_fmt_pf formatter)539 xfs_inumbers(
540 	struct xfs_mount	*mp,/* mount point for filesystem */
541 	xfs_ino_t		*lastino,/* last inode returned */
542 	int			*count,/* size of buffer/count returned */
543 	void			__user *ubuffer,/* buffer with inode descriptions */
544 	inumbers_fmt_pf		formatter)
545 {
546 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, *lastino);
547 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, *lastino);
548 	struct xfs_btree_cur	*cur = NULL;
549 	struct xfs_buf		*agbp = NULL;
550 	struct xfs_inogrp	*buffer;
551 	int			bcount;
552 	int			left = *count;
553 	int			bufidx = 0;
554 	int			error = 0;
555 
556 	*count = 0;
557 	if (agno >= mp->m_sb.sb_agcount ||
558 	    *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
559 		return error;
560 
561 	bcount = min(left, (int)(PAGE_SIZE / sizeof(*buffer)));
562 	buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
563 	do {
564 		struct xfs_inobt_rec_incore	r;
565 		int				stat;
566 
567 		if (!agbp) {
568 			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
569 			if (error)
570 				break;
571 
572 			cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
573 						    XFS_BTNUM_INO);
574 			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
575 						 &stat);
576 			if (error)
577 				break;
578 			if (!stat)
579 				goto next_ag;
580 		}
581 
582 		error = xfs_inobt_get_rec(cur, &r, &stat);
583 		if (error)
584 			break;
585 		if (!stat)
586 			goto next_ag;
587 
588 		agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
589 		buffer[bufidx].xi_startino =
590 			XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
591 		buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
592 		buffer[bufidx].xi_allocmask = ~r.ir_free;
593 		if (++bufidx == bcount) {
594 			long	written;
595 
596 			error = formatter(ubuffer, buffer, bufidx, &written);
597 			if (error)
598 				break;
599 			ubuffer += written;
600 			*count += bufidx;
601 			bufidx = 0;
602 		}
603 		if (!--left)
604 			break;
605 
606 		error = xfs_btree_increment(cur, 0, &stat);
607 		if (error)
608 			break;
609 		if (stat)
610 			continue;
611 
612 next_ag:
613 		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
614 		cur = NULL;
615 		xfs_buf_relse(agbp);
616 		agbp = NULL;
617 		agino = 0;
618 		agno++;
619 	} while (agno < mp->m_sb.sb_agcount);
620 
621 	if (!error) {
622 		if (bufidx) {
623 			long	written;
624 
625 			error = formatter(ubuffer, buffer, bufidx, &written);
626 			if (!error)
627 				*count += bufidx;
628 		}
629 		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
630 	}
631 
632 	kmem_free(buffer);
633 	if (cur)
634 		xfs_btree_del_cursor(cur, error);
635 	if (agbp)
636 		xfs_buf_relse(agbp);
637 
638 	return error;
639 }
640