1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_refcount_item.h"
17 #include "xfs_alloc.h"
18 #include "xfs_refcount.h"
19 
20 /*
21  * This routine is called to allocate a "refcount update done"
22  * log item.
23  */
24 struct xfs_cud_log_item *
xfs_trans_get_cud(struct xfs_trans * tp,struct xfs_cui_log_item * cuip)25 xfs_trans_get_cud(
26 	struct xfs_trans		*tp,
27 	struct xfs_cui_log_item		*cuip)
28 {
29 	struct xfs_cud_log_item		*cudp;
30 
31 	cudp = xfs_cud_init(tp->t_mountp, cuip);
32 	xfs_trans_add_item(tp, &cudp->cud_item);
33 	return cudp;
34 }
35 
36 /*
37  * Finish an refcount update and log it to the CUD. Note that the
38  * transaction is marked dirty regardless of whether the refcount
39  * update succeeds or fails to support the CUI/CUD lifecycle rules.
40  */
41 int
xfs_trans_log_finish_refcount_update(struct xfs_trans * tp,struct xfs_cud_log_item * cudp,enum xfs_refcount_intent_type type,xfs_fsblock_t startblock,xfs_extlen_t blockcount,xfs_fsblock_t * new_fsb,xfs_extlen_t * new_len,struct xfs_btree_cur ** pcur)42 xfs_trans_log_finish_refcount_update(
43 	struct xfs_trans		*tp,
44 	struct xfs_cud_log_item		*cudp,
45 	enum xfs_refcount_intent_type	type,
46 	xfs_fsblock_t			startblock,
47 	xfs_extlen_t			blockcount,
48 	xfs_fsblock_t			*new_fsb,
49 	xfs_extlen_t			*new_len,
50 	struct xfs_btree_cur		**pcur)
51 {
52 	int				error;
53 
54 	error = xfs_refcount_finish_one(tp, type, startblock,
55 			blockcount, new_fsb, new_len, pcur);
56 
57 	/*
58 	 * Mark the transaction dirty, even on error. This ensures the
59 	 * transaction is aborted, which:
60 	 *
61 	 * 1.) releases the CUI and frees the CUD
62 	 * 2.) shuts down the filesystem
63 	 */
64 	tp->t_flags |= XFS_TRANS_DIRTY;
65 	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
66 
67 	return error;
68 }
69 
70 /* Sort refcount intents by AG. */
71 static int
xfs_refcount_update_diff_items(void * priv,struct list_head * a,struct list_head * b)72 xfs_refcount_update_diff_items(
73 	void				*priv,
74 	struct list_head		*a,
75 	struct list_head		*b)
76 {
77 	struct xfs_mount		*mp = priv;
78 	struct xfs_refcount_intent	*ra;
79 	struct xfs_refcount_intent	*rb;
80 
81 	ra = container_of(a, struct xfs_refcount_intent, ri_list);
82 	rb = container_of(b, struct xfs_refcount_intent, ri_list);
83 	return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
84 		XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
85 }
86 
87 /* Get an CUI. */
88 STATIC void *
xfs_refcount_update_create_intent(struct xfs_trans * tp,unsigned int count)89 xfs_refcount_update_create_intent(
90 	struct xfs_trans		*tp,
91 	unsigned int			count)
92 {
93 	struct xfs_cui_log_item		*cuip;
94 
95 	ASSERT(tp != NULL);
96 	ASSERT(count > 0);
97 
98 	cuip = xfs_cui_init(tp->t_mountp, count);
99 	ASSERT(cuip != NULL);
100 
101 	/*
102 	 * Get a log_item_desc to point at the new item.
103 	 */
104 	xfs_trans_add_item(tp, &cuip->cui_item);
105 	return cuip;
106 }
107 
108 /* Set the phys extent flags for this reverse mapping. */
109 static void
xfs_trans_set_refcount_flags(struct xfs_phys_extent * refc,enum xfs_refcount_intent_type type)110 xfs_trans_set_refcount_flags(
111 	struct xfs_phys_extent		*refc,
112 	enum xfs_refcount_intent_type	type)
113 {
114 	refc->pe_flags = 0;
115 	switch (type) {
116 	case XFS_REFCOUNT_INCREASE:
117 	case XFS_REFCOUNT_DECREASE:
118 	case XFS_REFCOUNT_ALLOC_COW:
119 	case XFS_REFCOUNT_FREE_COW:
120 		refc->pe_flags |= type;
121 		break;
122 	default:
123 		ASSERT(0);
124 	}
125 }
126 
127 /* Log refcount updates in the intent item. */
128 STATIC void
xfs_refcount_update_log_item(struct xfs_trans * tp,void * intent,struct list_head * item)129 xfs_refcount_update_log_item(
130 	struct xfs_trans		*tp,
131 	void				*intent,
132 	struct list_head		*item)
133 {
134 	struct xfs_cui_log_item		*cuip = intent;
135 	struct xfs_refcount_intent	*refc;
136 	uint				next_extent;
137 	struct xfs_phys_extent		*ext;
138 
139 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
140 
141 	tp->t_flags |= XFS_TRANS_DIRTY;
142 	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
143 
144 	/*
145 	 * atomic_inc_return gives us the value after the increment;
146 	 * we want to use it as an array index so we need to subtract 1 from
147 	 * it.
148 	 */
149 	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
150 	ASSERT(next_extent < cuip->cui_format.cui_nextents);
151 	ext = &cuip->cui_format.cui_extents[next_extent];
152 	ext->pe_startblock = refc->ri_startblock;
153 	ext->pe_len = refc->ri_blockcount;
154 	xfs_trans_set_refcount_flags(ext, refc->ri_type);
155 }
156 
157 /* Get an CUD so we can process all the deferred refcount updates. */
158 STATIC void *
xfs_refcount_update_create_done(struct xfs_trans * tp,void * intent,unsigned int count)159 xfs_refcount_update_create_done(
160 	struct xfs_trans		*tp,
161 	void				*intent,
162 	unsigned int			count)
163 {
164 	return xfs_trans_get_cud(tp, intent);
165 }
166 
167 /* Process a deferred refcount update. */
168 STATIC int
xfs_refcount_update_finish_item(struct xfs_trans * tp,struct list_head * item,void * done_item,void ** state)169 xfs_refcount_update_finish_item(
170 	struct xfs_trans		*tp,
171 	struct list_head		*item,
172 	void				*done_item,
173 	void				**state)
174 {
175 	struct xfs_refcount_intent	*refc;
176 	xfs_fsblock_t			new_fsb;
177 	xfs_extlen_t			new_aglen;
178 	int				error;
179 
180 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
181 	error = xfs_trans_log_finish_refcount_update(tp, done_item,
182 			refc->ri_type,
183 			refc->ri_startblock,
184 			refc->ri_blockcount,
185 			&new_fsb, &new_aglen,
186 			(struct xfs_btree_cur **)state);
187 	/* Did we run out of reservation?  Requeue what we didn't finish. */
188 	if (!error && new_aglen > 0) {
189 		ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
190 		       refc->ri_type == XFS_REFCOUNT_DECREASE);
191 		refc->ri_startblock = new_fsb;
192 		refc->ri_blockcount = new_aglen;
193 		return -EAGAIN;
194 	}
195 	kmem_free(refc);
196 	return error;
197 }
198 
199 /* Clean up after processing deferred refcounts. */
200 STATIC void
xfs_refcount_update_finish_cleanup(struct xfs_trans * tp,void * state,int error)201 xfs_refcount_update_finish_cleanup(
202 	struct xfs_trans	*tp,
203 	void			*state,
204 	int			error)
205 {
206 	struct xfs_btree_cur	*rcur = state;
207 
208 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
209 }
210 
211 /* Abort all pending CUIs. */
212 STATIC void
xfs_refcount_update_abort_intent(void * intent)213 xfs_refcount_update_abort_intent(
214 	void				*intent)
215 {
216 	xfs_cui_release(intent);
217 }
218 
219 /* Cancel a deferred refcount update. */
220 STATIC void
xfs_refcount_update_cancel_item(struct list_head * item)221 xfs_refcount_update_cancel_item(
222 	struct list_head		*item)
223 {
224 	struct xfs_refcount_intent	*refc;
225 
226 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
227 	kmem_free(refc);
228 }
229 
230 static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
231 	.type		= XFS_DEFER_OPS_TYPE_REFCOUNT,
232 	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
233 	.diff_items	= xfs_refcount_update_diff_items,
234 	.create_intent	= xfs_refcount_update_create_intent,
235 	.abort_intent	= xfs_refcount_update_abort_intent,
236 	.log_item	= xfs_refcount_update_log_item,
237 	.create_done	= xfs_refcount_update_create_done,
238 	.finish_item	= xfs_refcount_update_finish_item,
239 	.finish_cleanup = xfs_refcount_update_finish_cleanup,
240 	.cancel_item	= xfs_refcount_update_cancel_item,
241 };
242 
243 /* Register the deferred op type. */
244 void
xfs_refcount_update_init_defer_op(void)245 xfs_refcount_update_init_defer_op(void)
246 {
247 	xfs_defer_init_op_type(&xfs_refcount_update_defer_type);
248 }
249