1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include <linux/capability.h>
8 
9 #include "xfs.h"
10 #include "xfs_fs.h"
11 #include "xfs_shared.h"
12 #include "xfs_format.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans_resv.h"
15 #include "xfs_bit.h"
16 #include "xfs_sb.h"
17 #include "xfs_mount.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_error.h"
21 #include "xfs_quota.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_defer.h"
26 
27 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
28 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
29 					uint);
30 
31 /*
32  * Turn off quota accounting and/or enforcement for all udquots and/or
33  * gdquots. Called only at unmount time.
34  *
35  * This assumes that there are no dquots of this file system cached
36  * incore, and modifies the ondisk dquot directly. Therefore, for example,
37  * it is an error to call this twice, without purging the cache.
38  */
39 int
xfs_qm_scall_quotaoff(xfs_mount_t * mp,uint flags)40 xfs_qm_scall_quotaoff(
41 	xfs_mount_t		*mp,
42 	uint			flags)
43 {
44 	struct xfs_quotainfo	*q = mp->m_quotainfo;
45 	uint			dqtype;
46 	int			error;
47 	uint			inactivate_flags;
48 	xfs_qoff_logitem_t	*qoffstart;
49 
50 	/*
51 	 * No file system can have quotas enabled on disk but not in core.
52 	 * Note that quota utilities (like quotaoff) _expect_
53 	 * errno == -EEXIST here.
54 	 */
55 	if ((mp->m_qflags & flags) == 0)
56 		return -EEXIST;
57 	error = 0;
58 
59 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
60 
61 	/*
62 	 * We don't want to deal with two quotaoffs messing up each other,
63 	 * so we're going to serialize it. quotaoff isn't exactly a performance
64 	 * critical thing.
65 	 * If quotaoff, then we must be dealing with the root filesystem.
66 	 */
67 	ASSERT(q);
68 	mutex_lock(&q->qi_quotaofflock);
69 
70 	/*
71 	 * If we're just turning off quota enforcement, change mp and go.
72 	 */
73 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
74 		mp->m_qflags &= ~(flags);
75 
76 		spin_lock(&mp->m_sb_lock);
77 		mp->m_sb.sb_qflags = mp->m_qflags;
78 		spin_unlock(&mp->m_sb_lock);
79 		mutex_unlock(&q->qi_quotaofflock);
80 
81 		/* XXX what to do if error ? Revert back to old vals incore ? */
82 		return xfs_sync_sb(mp, false);
83 	}
84 
85 	dqtype = 0;
86 	inactivate_flags = 0;
87 	/*
88 	 * If accounting is off, we must turn enforcement off, clear the
89 	 * quota 'CHKD' certificate to make it known that we have to
90 	 * do a quotacheck the next time this quota is turned on.
91 	 */
92 	if (flags & XFS_UQUOTA_ACCT) {
93 		dqtype |= XFS_QMOPT_UQUOTA;
94 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
95 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
96 	}
97 	if (flags & XFS_GQUOTA_ACCT) {
98 		dqtype |= XFS_QMOPT_GQUOTA;
99 		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
100 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
101 	}
102 	if (flags & XFS_PQUOTA_ACCT) {
103 		dqtype |= XFS_QMOPT_PQUOTA;
104 		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
105 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
106 	}
107 
108 	/*
109 	 * Nothing to do?  Don't complain. This happens when we're just
110 	 * turning off quota enforcement.
111 	 */
112 	if ((mp->m_qflags & flags) == 0)
113 		goto out_unlock;
114 
115 	/*
116 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
117 	 * and synchronously. If we fail to write, we should abort the
118 	 * operation as it cannot be recovered safely if we crash.
119 	 */
120 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
121 	if (error)
122 		goto out_unlock;
123 
124 	/*
125 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
126 	 * to take care of the race between dqget and quotaoff. We don't take
127 	 * any special locks to reset these bits. All processes need to check
128 	 * these bits *after* taking inode lock(s) to see if the particular
129 	 * quota type is in the process of being turned off. If *ACTIVE, it is
130 	 * guaranteed that all dquot structures and all quotainode ptrs will all
131 	 * stay valid as long as that inode is kept locked.
132 	 *
133 	 * There is no turning back after this.
134 	 */
135 	mp->m_qflags &= ~inactivate_flags;
136 
137 	/*
138 	 * Give back all the dquot reference(s) held by inodes.
139 	 * Here we go thru every single incore inode in this file system, and
140 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
141 	 * Essentially, as long as somebody has an inode locked, this guarantees
142 	 * that quotas will not be turned off. This is handy because in a
143 	 * transaction once we lock the inode(s) and check for quotaon, we can
144 	 * depend on the quota inodes (and other things) being valid as long as
145 	 * we keep the lock(s).
146 	 */
147 	xfs_qm_dqrele_all_inodes(mp, flags);
148 
149 	/*
150 	 * Next we make the changes in the quota flag in the mount struct.
151 	 * This isn't protected by a particular lock directly, because we
152 	 * don't want to take a mrlock every time we depend on quotas being on.
153 	 */
154 	mp->m_qflags &= ~flags;
155 
156 	/*
157 	 * Go through all the dquots of this file system and purge them,
158 	 * according to what was turned off.
159 	 */
160 	xfs_qm_dqpurge_all(mp, dqtype);
161 
162 	/*
163 	 * Transactions that had started before ACTIVE state bit was cleared
164 	 * could have logged many dquots, so they'd have higher LSNs than
165 	 * the first QUOTAOFF log record does. If we happen to crash when
166 	 * the tail of the log has gone past the QUOTAOFF record, but
167 	 * before the last dquot modification, those dquots __will__
168 	 * recover, and that's not good.
169 	 *
170 	 * So, we have QUOTAOFF start and end logitems; the start
171 	 * logitem won't get overwritten until the end logitem appears...
172 	 */
173 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
174 	if (error) {
175 		/* We're screwed now. Shutdown is the only option. */
176 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
177 		goto out_unlock;
178 	}
179 
180 	/*
181 	 * If all quotas are completely turned off, close shop.
182 	 */
183 	if (mp->m_qflags == 0) {
184 		mutex_unlock(&q->qi_quotaofflock);
185 		xfs_qm_destroy_quotainfo(mp);
186 		return 0;
187 	}
188 
189 	/*
190 	 * Release our quotainode references if we don't need them anymore.
191 	 */
192 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
193 		xfs_irele(q->qi_uquotaip);
194 		q->qi_uquotaip = NULL;
195 	}
196 	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
197 		xfs_irele(q->qi_gquotaip);
198 		q->qi_gquotaip = NULL;
199 	}
200 	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
201 		xfs_irele(q->qi_pquotaip);
202 		q->qi_pquotaip = NULL;
203 	}
204 
205 out_unlock:
206 	mutex_unlock(&q->qi_quotaofflock);
207 	return error;
208 }
209 
210 STATIC int
xfs_qm_scall_trunc_qfile(struct xfs_mount * mp,xfs_ino_t ino)211 xfs_qm_scall_trunc_qfile(
212 	struct xfs_mount	*mp,
213 	xfs_ino_t		ino)
214 {
215 	struct xfs_inode	*ip;
216 	struct xfs_trans	*tp;
217 	int			error;
218 
219 	if (ino == NULLFSINO)
220 		return 0;
221 
222 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
223 	if (error)
224 		return error;
225 
226 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
227 
228 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
229 	if (error) {
230 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
231 		goto out_put;
232 	}
233 
234 	xfs_ilock(ip, XFS_ILOCK_EXCL);
235 	xfs_trans_ijoin(tp, ip, 0);
236 
237 	ip->i_d.di_size = 0;
238 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
239 
240 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
241 	if (error) {
242 		xfs_trans_cancel(tp);
243 		goto out_unlock;
244 	}
245 
246 	ASSERT(ip->i_d.di_nextents == 0);
247 
248 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
249 	error = xfs_trans_commit(tp);
250 
251 out_unlock:
252 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
253 out_put:
254 	xfs_irele(ip);
255 	return error;
256 }
257 
258 int
xfs_qm_scall_trunc_qfiles(xfs_mount_t * mp,uint flags)259 xfs_qm_scall_trunc_qfiles(
260 	xfs_mount_t	*mp,
261 	uint		flags)
262 {
263 	int		error = -EINVAL;
264 
265 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
266 	    (flags & ~XFS_DQ_ALLTYPES)) {
267 		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
268 			__func__, flags, mp->m_qflags);
269 		return -EINVAL;
270 	}
271 
272 	if (flags & XFS_DQ_USER) {
273 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
274 		if (error)
275 			return error;
276 	}
277 	if (flags & XFS_DQ_GROUP) {
278 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
279 		if (error)
280 			return error;
281 	}
282 	if (flags & XFS_DQ_PROJ)
283 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
284 
285 	return error;
286 }
287 
288 /*
289  * Switch on (a given) quota enforcement for a filesystem.  This takes
290  * effect immediately.
291  * (Switching on quota accounting must be done at mount time.)
292  */
293 int
xfs_qm_scall_quotaon(xfs_mount_t * mp,uint flags)294 xfs_qm_scall_quotaon(
295 	xfs_mount_t	*mp,
296 	uint		flags)
297 {
298 	int		error;
299 	uint		qf;
300 
301 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
302 	/*
303 	 * Switching on quota accounting must be done at mount time.
304 	 */
305 	flags &= ~(XFS_ALL_QUOTA_ACCT);
306 
307 	if (flags == 0) {
308 		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
309 			__func__, mp->m_qflags);
310 		return -EINVAL;
311 	}
312 
313 	/*
314 	 * Can't enforce without accounting. We check the superblock
315 	 * qflags here instead of m_qflags because rootfs can have
316 	 * quota acct on ondisk without m_qflags' knowing.
317 	 */
318 	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
319 	     (flags & XFS_UQUOTA_ENFD)) ||
320 	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
321 	     (flags & XFS_GQUOTA_ENFD)) ||
322 	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
323 	     (flags & XFS_PQUOTA_ENFD))) {
324 		xfs_debug(mp,
325 			"%s: Can't enforce without acct, flags=%x sbflags=%x",
326 			__func__, flags, mp->m_sb.sb_qflags);
327 		return -EINVAL;
328 	}
329 	/*
330 	 * If everything's up to-date incore, then don't waste time.
331 	 */
332 	if ((mp->m_qflags & flags) == flags)
333 		return -EEXIST;
334 
335 	/*
336 	 * Change sb_qflags on disk but not incore mp->qflags
337 	 * if this is the root filesystem.
338 	 */
339 	spin_lock(&mp->m_sb_lock);
340 	qf = mp->m_sb.sb_qflags;
341 	mp->m_sb.sb_qflags = qf | flags;
342 	spin_unlock(&mp->m_sb_lock);
343 
344 	/*
345 	 * There's nothing to change if it's the same.
346 	 */
347 	if ((qf & flags) == flags)
348 		return -EEXIST;
349 
350 	error = xfs_sync_sb(mp, false);
351 	if (error)
352 		return error;
353 	/*
354 	 * If we aren't trying to switch on quota enforcement, we are done.
355 	 */
356 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
357 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
358 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
359 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
360 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
361 	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
362 		return 0;
363 
364 	if (! XFS_IS_QUOTA_RUNNING(mp))
365 		return -ESRCH;
366 
367 	/*
368 	 * Switch on quota enforcement in core.
369 	 */
370 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
371 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
372 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
373 
374 	return 0;
375 }
376 
377 #define XFS_QC_MASK \
378 	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
379 
380 /*
381  * Adjust quota limits, and start/stop timers accordingly.
382  */
383 int
xfs_qm_scall_setqlim(struct xfs_mount * mp,xfs_dqid_t id,uint type,struct qc_dqblk * newlim)384 xfs_qm_scall_setqlim(
385 	struct xfs_mount	*mp,
386 	xfs_dqid_t		id,
387 	uint			type,
388 	struct qc_dqblk		*newlim)
389 {
390 	struct xfs_quotainfo	*q = mp->m_quotainfo;
391 	struct xfs_disk_dquot	*ddq;
392 	struct xfs_dquot	*dqp;
393 	struct xfs_trans	*tp;
394 	struct xfs_def_quota	*defq;
395 	int			error;
396 	xfs_qcnt_t		hard, soft;
397 
398 	if (newlim->d_fieldmask & ~XFS_QC_MASK)
399 		return -EINVAL;
400 	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
401 		return 0;
402 
403 	/*
404 	 * We don't want to race with a quotaoff so take the quotaoff lock.
405 	 * We don't hold an inode lock, so there's nothing else to stop
406 	 * a quotaoff from happening.
407 	 */
408 	mutex_lock(&q->qi_quotaofflock);
409 
410 	/*
411 	 * Get the dquot (locked) before we start, as we need to do a
412 	 * transaction to allocate it if it doesn't exist. Once we have the
413 	 * dquot, unlock it so we can start the next transaction safely. We hold
414 	 * a reference to the dquot, so it's safe to do this unlock/lock without
415 	 * it being reclaimed in the mean time.
416 	 */
417 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
418 	if (error) {
419 		ASSERT(error != -ENOENT);
420 		goto out_unlock;
421 	}
422 
423 	defq = xfs_get_defquota(dqp, q);
424 	xfs_dqunlock(dqp);
425 
426 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
427 	if (error)
428 		goto out_rele;
429 
430 	xfs_dqlock(dqp);
431 	xfs_trans_dqjoin(tp, dqp);
432 	ddq = &dqp->q_core;
433 
434 	/*
435 	 * Make sure that hardlimits are >= soft limits before changing.
436 	 */
437 	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
438 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
439 			be64_to_cpu(ddq->d_blk_hardlimit);
440 	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
441 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
442 			be64_to_cpu(ddq->d_blk_softlimit);
443 	if (hard == 0 || hard >= soft) {
444 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
445 		ddq->d_blk_softlimit = cpu_to_be64(soft);
446 		xfs_dquot_set_prealloc_limits(dqp);
447 		if (id == 0) {
448 			defq->bhardlimit = hard;
449 			defq->bsoftlimit = soft;
450 		}
451 	} else {
452 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
453 	}
454 	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
455 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
456 			be64_to_cpu(ddq->d_rtb_hardlimit);
457 	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
458 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
459 			be64_to_cpu(ddq->d_rtb_softlimit);
460 	if (hard == 0 || hard >= soft) {
461 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
462 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
463 		if (id == 0) {
464 			defq->rtbhardlimit = hard;
465 			defq->rtbsoftlimit = soft;
466 		}
467 	} else {
468 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
469 	}
470 
471 	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
472 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
473 			be64_to_cpu(ddq->d_ino_hardlimit);
474 	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
475 		(xfs_qcnt_t) newlim->d_ino_softlimit :
476 			be64_to_cpu(ddq->d_ino_softlimit);
477 	if (hard == 0 || hard >= soft) {
478 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
479 		ddq->d_ino_softlimit = cpu_to_be64(soft);
480 		if (id == 0) {
481 			defq->ihardlimit = hard;
482 			defq->isoftlimit = soft;
483 		}
484 	} else {
485 		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
486 	}
487 
488 	/*
489 	 * Update warnings counter(s) if requested
490 	 */
491 	if (newlim->d_fieldmask & QC_SPC_WARNS)
492 		ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
493 	if (newlim->d_fieldmask & QC_INO_WARNS)
494 		ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
495 	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
496 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
497 
498 	if (id == 0) {
499 		/*
500 		 * Timelimits for the super user set the relative time
501 		 * the other users can be over quota for this file system.
502 		 * If it is zero a default is used.  Ditto for the default
503 		 * soft and hard limit values (already done, above), and
504 		 * for warnings.
505 		 */
506 		if (newlim->d_fieldmask & QC_SPC_TIMER) {
507 			q->qi_btimelimit = newlim->d_spc_timer;
508 			ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
509 		}
510 		if (newlim->d_fieldmask & QC_INO_TIMER) {
511 			q->qi_itimelimit = newlim->d_ino_timer;
512 			ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
513 		}
514 		if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
515 			q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
516 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
517 		}
518 		if (newlim->d_fieldmask & QC_SPC_WARNS)
519 			q->qi_bwarnlimit = newlim->d_spc_warns;
520 		if (newlim->d_fieldmask & QC_INO_WARNS)
521 			q->qi_iwarnlimit = newlim->d_ino_warns;
522 		if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
523 			q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
524 	} else {
525 		/*
526 		 * If the user is now over quota, start the timelimit.
527 		 * The user will not be 'warned'.
528 		 * Note that we keep the timers ticking, whether enforcement
529 		 * is on or off. We don't really want to bother with iterating
530 		 * over all ondisk dquots and turning the timers on/off.
531 		 */
532 		xfs_qm_adjust_dqtimers(mp, ddq);
533 	}
534 	dqp->dq_flags |= XFS_DQ_DIRTY;
535 	xfs_trans_log_dquot(tp, dqp);
536 
537 	error = xfs_trans_commit(tp);
538 
539 out_rele:
540 	xfs_qm_dqrele(dqp);
541 out_unlock:
542 	mutex_unlock(&q->qi_quotaofflock);
543 	return error;
544 }
545 
546 STATIC int
xfs_qm_log_quotaoff_end(xfs_mount_t * mp,xfs_qoff_logitem_t * startqoff,uint flags)547 xfs_qm_log_quotaoff_end(
548 	xfs_mount_t		*mp,
549 	xfs_qoff_logitem_t	*startqoff,
550 	uint			flags)
551 {
552 	xfs_trans_t		*tp;
553 	int			error;
554 	xfs_qoff_logitem_t	*qoffi;
555 
556 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
557 	if (error)
558 		return error;
559 
560 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
561 					flags & XFS_ALL_QUOTA_ACCT);
562 	xfs_trans_log_quotaoff_item(tp, qoffi);
563 
564 	/*
565 	 * We have to make sure that the transaction is secure on disk before we
566 	 * return and actually stop quota accounting. So, make it synchronous.
567 	 * We don't care about quotoff's performance.
568 	 */
569 	xfs_trans_set_sync(tp);
570 	return xfs_trans_commit(tp);
571 }
572 
573 
574 STATIC int
xfs_qm_log_quotaoff(xfs_mount_t * mp,xfs_qoff_logitem_t ** qoffstartp,uint flags)575 xfs_qm_log_quotaoff(
576 	xfs_mount_t	       *mp,
577 	xfs_qoff_logitem_t     **qoffstartp,
578 	uint		       flags)
579 {
580 	xfs_trans_t	       *tp;
581 	int			error;
582 	xfs_qoff_logitem_t     *qoffi;
583 
584 	*qoffstartp = NULL;
585 
586 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
587 	if (error)
588 		goto out;
589 
590 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
591 	xfs_trans_log_quotaoff_item(tp, qoffi);
592 
593 	spin_lock(&mp->m_sb_lock);
594 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
595 	spin_unlock(&mp->m_sb_lock);
596 
597 	xfs_log_sb(tp);
598 
599 	/*
600 	 * We have to make sure that the transaction is secure on disk before we
601 	 * return and actually stop quota accounting. So, make it synchronous.
602 	 * We don't care about quotoff's performance.
603 	 */
604 	xfs_trans_set_sync(tp);
605 	error = xfs_trans_commit(tp);
606 	if (error)
607 		goto out;
608 
609 	*qoffstartp = qoffi;
610 out:
611 	return error;
612 }
613 
614 /* Fill out the quota context. */
615 static void
xfs_qm_scall_getquota_fill_qc(struct xfs_mount * mp,uint type,const struct xfs_dquot * dqp,struct qc_dqblk * dst)616 xfs_qm_scall_getquota_fill_qc(
617 	struct xfs_mount	*mp,
618 	uint			type,
619 	const struct xfs_dquot	*dqp,
620 	struct qc_dqblk		*dst)
621 {
622 	memset(dst, 0, sizeof(*dst));
623 	dst->d_spc_hardlimit =
624 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
625 	dst->d_spc_softlimit =
626 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
627 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
628 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
629 	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
630 	dst->d_ino_count = dqp->q_res_icount;
631 	dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
632 	dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
633 	dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
634 	dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
635 	dst->d_rt_spc_hardlimit =
636 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
637 	dst->d_rt_spc_softlimit =
638 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
639 	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
640 	dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
641 	dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
642 
643 	/*
644 	 * Internally, we don't reset all the timers when quota enforcement
645 	 * gets turned off. No need to confuse the user level code,
646 	 * so return zeroes in that case.
647 	 */
648 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
649 	     dqp->q_core.d_flags == XFS_DQ_USER) ||
650 	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
651 	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
652 	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
653 	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
654 		dst->d_spc_timer = 0;
655 		dst->d_ino_timer = 0;
656 		dst->d_rt_spc_timer = 0;
657 	}
658 
659 #ifdef DEBUG
660 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
661 	     (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
662 	     (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
663 	    dqp->q_core.d_id != 0) {
664 		if ((dst->d_space > dst->d_spc_softlimit) &&
665 		    (dst->d_spc_softlimit > 0)) {
666 			ASSERT(dst->d_spc_timer != 0);
667 		}
668 		if ((dst->d_ino_count > dst->d_ino_softlimit) &&
669 		    (dst->d_ino_softlimit > 0)) {
670 			ASSERT(dst->d_ino_timer != 0);
671 		}
672 	}
673 #endif
674 }
675 
676 /* Return the quota information for the dquot matching id. */
677 int
xfs_qm_scall_getquota(struct xfs_mount * mp,xfs_dqid_t id,uint type,struct qc_dqblk * dst)678 xfs_qm_scall_getquota(
679 	struct xfs_mount	*mp,
680 	xfs_dqid_t		id,
681 	uint			type,
682 	struct qc_dqblk		*dst)
683 {
684 	struct xfs_dquot	*dqp;
685 	int			error;
686 
687 	/*
688 	 * Try to get the dquot. We don't want it allocated on disk, so don't
689 	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
690 	 */
691 	error = xfs_qm_dqget(mp, id, type, false, &dqp);
692 	if (error)
693 		return error;
694 
695 	/*
696 	 * If everything's NULL, this dquot doesn't quite exist as far as
697 	 * our utility programs are concerned.
698 	 */
699 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
700 		error = -ENOENT;
701 		goto out_put;
702 	}
703 
704 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
705 
706 out_put:
707 	xfs_qm_dqput(dqp);
708 	return error;
709 }
710 
711 /*
712  * Return the quota information for the first initialized dquot whose id
713  * is at least as high as id.
714  */
715 int
xfs_qm_scall_getquota_next(struct xfs_mount * mp,xfs_dqid_t * id,uint type,struct qc_dqblk * dst)716 xfs_qm_scall_getquota_next(
717 	struct xfs_mount	*mp,
718 	xfs_dqid_t		*id,
719 	uint			type,
720 	struct qc_dqblk		*dst)
721 {
722 	struct xfs_dquot	*dqp;
723 	int			error;
724 
725 	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
726 	if (error)
727 		return error;
728 
729 	/* Fill in the ID we actually read from disk */
730 	*id = be32_to_cpu(dqp->q_core.d_id);
731 
732 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
733 
734 	xfs_qm_dqput(dqp);
735 	return error;
736 }
737 
738 STATIC int
xfs_dqrele_inode(struct xfs_inode * ip,int flags,void * args)739 xfs_dqrele_inode(
740 	struct xfs_inode	*ip,
741 	int			flags,
742 	void			*args)
743 {
744 	/* skip quota inodes */
745 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
746 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
747 	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
748 		ASSERT(ip->i_udquot == NULL);
749 		ASSERT(ip->i_gdquot == NULL);
750 		ASSERT(ip->i_pdquot == NULL);
751 		return 0;
752 	}
753 
754 	xfs_ilock(ip, XFS_ILOCK_EXCL);
755 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
756 		xfs_qm_dqrele(ip->i_udquot);
757 		ip->i_udquot = NULL;
758 	}
759 	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
760 		xfs_qm_dqrele(ip->i_gdquot);
761 		ip->i_gdquot = NULL;
762 	}
763 	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
764 		xfs_qm_dqrele(ip->i_pdquot);
765 		ip->i_pdquot = NULL;
766 	}
767 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
768 	return 0;
769 }
770 
771 
772 /*
773  * Go thru all the inodes in the file system, releasing their dquots.
774  *
775  * Note that the mount structure gets modified to indicate that quotas are off
776  * AFTER this, in the case of quotaoff.
777  */
778 void
xfs_qm_dqrele_all_inodes(struct xfs_mount * mp,uint flags)779 xfs_qm_dqrele_all_inodes(
780 	struct xfs_mount *mp,
781 	uint		 flags)
782 {
783 	ASSERT(mp->m_quotainfo);
784 	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
785 				    XFS_AGITER_INEW_WAIT);
786 }
787