1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/bio.h>
21 #include <linux/gfs2_ondisk.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "rgrp.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "trace_gfs2.h"
35 
gfs2_aspace_writepage(struct page * page,struct writeback_control * wbc)36 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37 {
38 	struct buffer_head *bh, *head;
39 	int nr_underway = 0;
40 	int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
41 
42 	BUG_ON(!PageLocked(page));
43 	BUG_ON(!page_has_buffers(page));
44 
45 	head = page_buffers(page);
46 	bh = head;
47 
48 	do {
49 		if (!buffer_mapped(bh))
50 			continue;
51 		/*
52 		 * If it's a fully non-blocking write attempt and we cannot
53 		 * lock the buffer then redirty the page.  Note that this can
54 		 * potentially cause a busy-wait loop from flusher thread and kswapd
55 		 * activity, but those code paths have their own higher-level
56 		 * throttling.
57 		 */
58 		if (wbc->sync_mode != WB_SYNC_NONE) {
59 			lock_buffer(bh);
60 		} else if (!trylock_buffer(bh)) {
61 			redirty_page_for_writepage(wbc, page);
62 			continue;
63 		}
64 		if (test_clear_buffer_dirty(bh)) {
65 			mark_buffer_async_write(bh);
66 		} else {
67 			unlock_buffer(bh);
68 		}
69 	} while ((bh = bh->b_this_page) != head);
70 
71 	/*
72 	 * The page and its buffers are protected by PageWriteback(), so we can
73 	 * drop the bh refcounts early.
74 	 */
75 	BUG_ON(PageWriteback(page));
76 	set_page_writeback(page);
77 
78 	do {
79 		struct buffer_head *next = bh->b_this_page;
80 		if (buffer_async_write(bh)) {
81 			submit_bh(REQ_OP_WRITE, write_flags, bh);
82 			nr_underway++;
83 		}
84 		bh = next;
85 	} while (bh != head);
86 	unlock_page(page);
87 
88 	if (nr_underway == 0)
89 		end_page_writeback(page);
90 
91 	return 0;
92 }
93 
94 const struct address_space_operations gfs2_meta_aops = {
95 	.writepage = gfs2_aspace_writepage,
96 	.releasepage = gfs2_releasepage,
97 };
98 
99 const struct address_space_operations gfs2_rgrp_aops = {
100 	.writepage = gfs2_aspace_writepage,
101 	.releasepage = gfs2_releasepage,
102 };
103 
104 /**
105  * gfs2_getbuf - Get a buffer with a given address space
106  * @gl: the glock
107  * @blkno: the block number (filesystem scope)
108  * @create: 1 if the buffer should be created
109  *
110  * Returns: the buffer
111  */
112 
gfs2_getbuf(struct gfs2_glock * gl,u64 blkno,int create)113 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
114 {
115 	struct address_space *mapping = gfs2_glock2aspace(gl);
116 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
117 	struct page *page;
118 	struct buffer_head *bh;
119 	unsigned int shift;
120 	unsigned long index;
121 	unsigned int bufnum;
122 
123 	if (mapping == NULL)
124 		mapping = &sdp->sd_aspace;
125 
126 	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
127 	index = blkno >> shift;             /* convert block to page */
128 	bufnum = blkno - (index << shift);  /* block buf index within page */
129 
130 	if (create) {
131 		for (;;) {
132 			page = grab_cache_page(mapping, index);
133 			if (page)
134 				break;
135 			yield();
136 		}
137 	} else {
138 		page = find_get_page_flags(mapping, index,
139 						FGP_LOCK|FGP_ACCESSED);
140 		if (!page)
141 			return NULL;
142 	}
143 
144 	if (!page_has_buffers(page))
145 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
146 
147 	/* Locate header for our buffer within our page */
148 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
149 		/* Do nothing */;
150 	get_bh(bh);
151 
152 	if (!buffer_mapped(bh))
153 		map_bh(bh, sdp->sd_vfs, blkno);
154 
155 	unlock_page(page);
156 	put_page(page);
157 
158 	return bh;
159 }
160 
meta_prep_new(struct buffer_head * bh)161 static void meta_prep_new(struct buffer_head *bh)
162 {
163 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
164 
165 	lock_buffer(bh);
166 	clear_buffer_dirty(bh);
167 	set_buffer_uptodate(bh);
168 	unlock_buffer(bh);
169 
170 	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
171 }
172 
173 /**
174  * gfs2_meta_new - Get a block
175  * @gl: The glock associated with this block
176  * @blkno: The block number
177  *
178  * Returns: The buffer
179  */
180 
gfs2_meta_new(struct gfs2_glock * gl,u64 blkno)181 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
182 {
183 	struct buffer_head *bh;
184 	bh = gfs2_getbuf(gl, blkno, CREATE);
185 	meta_prep_new(bh);
186 	return bh;
187 }
188 
gfs2_meta_read_endio(struct bio * bio)189 static void gfs2_meta_read_endio(struct bio *bio)
190 {
191 	struct bio_vec *bvec;
192 	int i;
193 
194 	bio_for_each_segment_all(bvec, bio, i) {
195 		struct page *page = bvec->bv_page;
196 		struct buffer_head *bh = page_buffers(page);
197 		unsigned int len = bvec->bv_len;
198 
199 		while (bh_offset(bh) < bvec->bv_offset)
200 			bh = bh->b_this_page;
201 		do {
202 			struct buffer_head *next = bh->b_this_page;
203 			len -= bh->b_size;
204 			bh->b_end_io(bh, !bio->bi_status);
205 			bh = next;
206 		} while (bh && len);
207 	}
208 	bio_put(bio);
209 }
210 
211 /*
212  * Submit several consecutive buffer head I/O requests as a single bio I/O
213  * request.  (See submit_bh_wbc.)
214  */
gfs2_submit_bhs(int op,int op_flags,struct buffer_head * bhs[],int num)215 static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
216 			    int num)
217 {
218 	while (num > 0) {
219 		struct buffer_head *bh = *bhs;
220 		struct bio *bio;
221 
222 		bio = bio_alloc(GFP_NOIO, num);
223 		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
224 		bio_set_dev(bio, bh->b_bdev);
225 		while (num > 0) {
226 			bh = *bhs;
227 			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
228 				BUG_ON(bio->bi_iter.bi_size == 0);
229 				break;
230 			}
231 			bhs++;
232 			num--;
233 		}
234 		bio->bi_end_io = gfs2_meta_read_endio;
235 		bio_set_op_attrs(bio, op, op_flags);
236 		submit_bio(bio);
237 	}
238 }
239 
240 /**
241  * gfs2_meta_read - Read a block from disk
242  * @gl: The glock covering the block
243  * @blkno: The block number
244  * @flags: flags
245  * @bhp: the place where the buffer is returned (NULL on failure)
246  *
247  * Returns: errno
248  */
249 
gfs2_meta_read(struct gfs2_glock * gl,u64 blkno,int flags,int rahead,struct buffer_head ** bhp)250 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
251 		   int rahead, struct buffer_head **bhp)
252 {
253 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
254 	struct buffer_head *bh, *bhs[2];
255 	int num = 0;
256 
257 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
258 		*bhp = NULL;
259 		return -EIO;
260 	}
261 
262 	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
263 
264 	lock_buffer(bh);
265 	if (buffer_uptodate(bh)) {
266 		unlock_buffer(bh);
267 		flags &= ~DIO_WAIT;
268 	} else {
269 		bh->b_end_io = end_buffer_read_sync;
270 		get_bh(bh);
271 		bhs[num++] = bh;
272 	}
273 
274 	if (rahead) {
275 		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
276 
277 		lock_buffer(bh);
278 		if (buffer_uptodate(bh)) {
279 			unlock_buffer(bh);
280 			brelse(bh);
281 		} else {
282 			bh->b_end_io = end_buffer_read_sync;
283 			bhs[num++] = bh;
284 		}
285 	}
286 
287 	gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
288 	if (!(flags & DIO_WAIT))
289 		return 0;
290 
291 	bh = *bhp;
292 	wait_on_buffer(bh);
293 	if (unlikely(!buffer_uptodate(bh))) {
294 		struct gfs2_trans *tr = current->journal_info;
295 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
296 			gfs2_io_error_bh_wd(sdp, bh);
297 		brelse(bh);
298 		*bhp = NULL;
299 		return -EIO;
300 	}
301 
302 	return 0;
303 }
304 
305 /**
306  * gfs2_meta_wait - Reread a block from disk
307  * @sdp: the filesystem
308  * @bh: The block to wait for
309  *
310  * Returns: errno
311  */
312 
gfs2_meta_wait(struct gfs2_sbd * sdp,struct buffer_head * bh)313 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
314 {
315 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
316 		return -EIO;
317 
318 	wait_on_buffer(bh);
319 
320 	if (!buffer_uptodate(bh)) {
321 		struct gfs2_trans *tr = current->journal_info;
322 		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
323 			gfs2_io_error_bh_wd(sdp, bh);
324 		return -EIO;
325 	}
326 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
327 		return -EIO;
328 
329 	return 0;
330 }
331 
gfs2_remove_from_journal(struct buffer_head * bh,int meta)332 void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
333 {
334 	struct address_space *mapping = bh->b_page->mapping;
335 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
336 	struct gfs2_bufdata *bd = bh->b_private;
337 	struct gfs2_trans *tr = current->journal_info;
338 	int was_pinned = 0;
339 
340 	if (test_clear_buffer_pinned(bh)) {
341 		trace_gfs2_pin(bd, 0);
342 		atomic_dec(&sdp->sd_log_pinned);
343 		list_del_init(&bd->bd_list);
344 		if (meta == REMOVE_META)
345 			tr->tr_num_buf_rm++;
346 		else
347 			tr->tr_num_databuf_rm++;
348 		set_bit(TR_TOUCHED, &tr->tr_flags);
349 		was_pinned = 1;
350 		brelse(bh);
351 	}
352 	if (bd) {
353 		spin_lock(&sdp->sd_ail_lock);
354 		if (bd->bd_tr) {
355 			gfs2_trans_add_revoke(sdp, bd);
356 		} else if (was_pinned) {
357 			bh->b_private = NULL;
358 			kmem_cache_free(gfs2_bufdata_cachep, bd);
359 		}
360 		spin_unlock(&sdp->sd_ail_lock);
361 	}
362 	clear_buffer_dirty(bh);
363 	clear_buffer_uptodate(bh);
364 }
365 
366 /**
367  * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
368  * @ip: the inode who owns the buffers
369  * @bstart: the first buffer in the run
370  * @blen: the number of buffers in the run
371  *
372  */
373 
gfs2_meta_wipe(struct gfs2_inode * ip,u64 bstart,u32 blen)374 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
375 {
376 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
377 	struct buffer_head *bh;
378 
379 	while (blen) {
380 		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
381 		if (bh) {
382 			lock_buffer(bh);
383 			gfs2_log_lock(sdp);
384 			gfs2_remove_from_journal(bh, REMOVE_META);
385 			gfs2_log_unlock(sdp);
386 			unlock_buffer(bh);
387 			brelse(bh);
388 		}
389 
390 		bstart++;
391 		blen--;
392 	}
393 }
394 
395 /**
396  * gfs2_meta_indirect_buffer - Get a metadata buffer
397  * @ip: The GFS2 inode
398  * @height: The level of this buf in the metadata (indir addr) tree (if any)
399  * @num: The block number (device relative) of the buffer
400  * @bhp: the buffer is returned here
401  *
402  * Returns: errno
403  */
404 
gfs2_meta_indirect_buffer(struct gfs2_inode * ip,int height,u64 num,struct buffer_head ** bhp)405 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
406 			      struct buffer_head **bhp)
407 {
408 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
409 	struct gfs2_glock *gl = ip->i_gl;
410 	struct buffer_head *bh;
411 	int ret = 0;
412 	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
413 	int rahead = 0;
414 
415 	if (num == ip->i_no_addr)
416 		rahead = ip->i_rahead;
417 
418 	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
419 	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
420 		brelse(bh);
421 		ret = -EIO;
422 	} else {
423 		*bhp = bh;
424 	}
425 	return ret;
426 }
427 
428 /**
429  * gfs2_meta_ra - start readahead on an extent of a file
430  * @gl: the glock the blocks belong to
431  * @dblock: the starting disk block
432  * @extlen: the number of blocks in the extent
433  *
434  * returns: the first buffer in the extent
435  */
436 
gfs2_meta_ra(struct gfs2_glock * gl,u64 dblock,u32 extlen)437 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
438 {
439 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
440 	struct buffer_head *first_bh, *bh;
441 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
442 			  sdp->sd_sb.sb_bsize_shift;
443 
444 	BUG_ON(!extlen);
445 
446 	if (max_ra < 1)
447 		max_ra = 1;
448 	if (extlen > max_ra)
449 		extlen = max_ra;
450 
451 	first_bh = gfs2_getbuf(gl, dblock, CREATE);
452 
453 	if (buffer_uptodate(first_bh))
454 		goto out;
455 	if (!buffer_locked(first_bh))
456 		ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
457 
458 	dblock++;
459 	extlen--;
460 
461 	while (extlen) {
462 		bh = gfs2_getbuf(gl, dblock, CREATE);
463 
464 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
465 			ll_rw_block(REQ_OP_READ,
466 				    REQ_RAHEAD | REQ_META | REQ_PRIO,
467 				    1, &bh);
468 		brelse(bh);
469 		dblock++;
470 		extlen--;
471 		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
472 			goto out;
473 	}
474 
475 	wait_on_buffer(first_bh);
476 out:
477 	return first_bh;
478 }
479 
480