1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 #include <linux/list_sort.h>
20 
21 #include "dir.h"
22 #include "gfs2.h"
23 #include "incore.h"
24 #include "inode.h"
25 #include "glock.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34 
35 /**
36  * gfs2_pin - Pin a buffer in memory
37  * @sdp: The superblock
38  * @bh: The buffer to be pinned
39  *
40  * The log lock must be held when calling this function
41  */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 	struct gfs2_bufdata *bd;
45 
46 	BUG_ON(!current->journal_info);
47 
48 	clear_buffer_dirty(bh);
49 	if (test_set_buffer_pinned(bh))
50 		gfs2_assert_withdraw(sdp, 0);
51 	if (!buffer_uptodate(bh))
52 		gfs2_io_error_bh_wd(sdp, bh);
53 	bd = bh->b_private;
54 	/* If this buffer is in the AIL and it has already been written
55 	 * to in-place disk block, remove it from the AIL.
56 	 */
57 	spin_lock(&sdp->sd_ail_lock);
58 	if (bd->bd_tr)
59 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 	spin_unlock(&sdp->sd_ail_lock);
61 	get_bh(bh);
62 	atomic_inc(&sdp->sd_log_pinned);
63 	trace_gfs2_pin(bd, 1);
64 }
65 
buffer_is_rgrp(const struct gfs2_bufdata * bd)66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70 
maybe_release_space(struct gfs2_bufdata * bd)71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 	struct gfs2_glock *gl = bd->bd_gl;
74 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
78 
79 	if (bi->bi_clone == NULL)
80 		return;
81 	if (sdp->sd_args.ar_discard)
82 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
83 	memcpy(bi->bi_clone + bi->bi_offset,
84 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
85 	clear_bit(GBF_FULL, &bi->bi_flags);
86 	rgd->rd_free_clone = rgd->rd_free;
87 	rgd->rd_extfail_pt = rgd->rd_free;
88 }
89 
90 /**
91  * gfs2_unpin - Unpin a buffer
92  * @sdp: the filesystem the buffer belongs to
93  * @bh: The buffer to unpin
94  * @ai:
95  * @flags: The inode dirty flags
96  *
97  */
98 
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)99 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100 		       struct gfs2_trans *tr)
101 {
102 	struct gfs2_bufdata *bd = bh->b_private;
103 
104 	BUG_ON(!buffer_uptodate(bh));
105 	BUG_ON(!buffer_pinned(bh));
106 
107 	lock_buffer(bh);
108 	mark_buffer_dirty(bh);
109 	clear_buffer_pinned(bh);
110 
111 	if (buffer_is_rgrp(bd))
112 		maybe_release_space(bd);
113 
114 	spin_lock(&sdp->sd_ail_lock);
115 	if (bd->bd_tr) {
116 		list_del(&bd->bd_ail_st_list);
117 		brelse(bh);
118 	} else {
119 		struct gfs2_glock *gl = bd->bd_gl;
120 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121 		atomic_inc(&gl->gl_ail_count);
122 	}
123 	bd->bd_tr = tr;
124 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125 	spin_unlock(&sdp->sd_ail_lock);
126 
127 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128 	trace_gfs2_pin(bd, 0);
129 	unlock_buffer(bh);
130 	atomic_dec(&sdp->sd_log_pinned);
131 }
132 
gfs2_log_incr_head(struct gfs2_sbd * sdp)133 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134 {
135 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
137 
138 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139 		sdp->sd_log_flush_head = 0;
140 }
141 
gfs2_log_bmap(struct gfs2_sbd * sdp)142 u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
143 {
144 	unsigned int lbn = sdp->sd_log_flush_head;
145 	struct gfs2_journal_extent *je;
146 	u64 block;
147 
148 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
149 		if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
150 			block = je->dblock + lbn - je->lblock;
151 			gfs2_log_incr_head(sdp);
152 			return block;
153 		}
154 	}
155 
156 	return -1;
157 }
158 
159 /**
160  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
161  * @sdp: The superblock
162  * @bvec: The bio_vec
163  * @error: The i/o status
164  *
165  * This finds the relevant buffers and unlocks them and sets the
166  * error flag according to the status of the i/o request. This is
167  * used when the log is writing data which has an in-place version
168  * that is pinned in the pagecache.
169  */
170 
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct bio_vec * bvec,blk_status_t error)171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
172 				  blk_status_t error)
173 {
174 	struct buffer_head *bh, *next;
175 	struct page *page = bvec->bv_page;
176 	unsigned size;
177 
178 	bh = page_buffers(page);
179 	size = bvec->bv_len;
180 	while (bh_offset(bh) < bvec->bv_offset)
181 		bh = bh->b_this_page;
182 	do {
183 		if (error)
184 			mark_buffer_write_io_error(bh);
185 		unlock_buffer(bh);
186 		next = bh->b_this_page;
187 		size -= bh->b_size;
188 		brelse(bh);
189 		bh = next;
190 	} while(bh && size);
191 }
192 
193 /**
194  * gfs2_end_log_write - end of i/o to the log
195  * @bio: The bio
196  * @error: Status of i/o request
197  *
198  * Each bio_vec contains either data from the pagecache or data
199  * relating to the log itself. Here we iterate over the bio_vec
200  * array, processing both kinds of data.
201  *
202  */
203 
gfs2_end_log_write(struct bio * bio)204 static void gfs2_end_log_write(struct bio *bio)
205 {
206 	struct gfs2_sbd *sdp = bio->bi_private;
207 	struct bio_vec *bvec;
208 	struct page *page;
209 	int i;
210 
211 	if (bio->bi_status) {
212 		fs_err(sdp, "Error %d writing to journal, jid=%u\n",
213 		       bio->bi_status, sdp->sd_jdesc->jd_jid);
214 		wake_up(&sdp->sd_logd_waitq);
215 	}
216 
217 	bio_for_each_segment_all(bvec, bio, i) {
218 		page = bvec->bv_page;
219 		if (page_has_buffers(page))
220 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
221 		else
222 			mempool_free(page, gfs2_page_pool);
223 	}
224 
225 	bio_put(bio);
226 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227 		wake_up(&sdp->sd_log_flush_wait);
228 }
229 
230 /**
231  * gfs2_log_flush_bio - Submit any pending log bio
232  * @sdp: The superblock
233  * @op: REQ_OP
234  * @op_flags: req_flag_bits
235  *
236  * Submit any pending part-built or full bio to the block device. If
237  * there is no pending bio, then this is a no-op.
238  */
239 
gfs2_log_flush_bio(struct gfs2_sbd * sdp,int op,int op_flags)240 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
241 {
242 	if (sdp->sd_log_bio) {
243 		atomic_inc(&sdp->sd_log_in_flight);
244 		bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
245 		submit_bio(sdp->sd_log_bio);
246 		sdp->sd_log_bio = NULL;
247 	}
248 }
249 
250 /**
251  * gfs2_log_alloc_bio - Allocate a new bio for log writing
252  * @sdp: The superblock
253  * @blkno: The next device block number we want to write to
254  *
255  * This should never be called when there is a cached bio in the
256  * super block. When it returns, there will be a cached bio in the
257  * super block which will have as many bio_vecs as the device is
258  * happy to handle.
259  *
260  * Returns: Newly allocated bio
261  */
262 
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno)263 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
264 {
265 	struct super_block *sb = sdp->sd_vfs;
266 	struct bio *bio;
267 
268 	BUG_ON(sdp->sd_log_bio);
269 
270 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
271 	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272 	bio_set_dev(bio, sb->s_bdev);
273 	bio->bi_end_io = gfs2_end_log_write;
274 	bio->bi_private = sdp;
275 
276 	sdp->sd_log_bio = bio;
277 
278 	return bio;
279 }
280 
281 /**
282  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
283  * @sdp: The superblock
284  * @blkno: The device block number we want to write to
285  *
286  * If there is a cached bio, then if the next block number is sequential
287  * with the previous one, return it, otherwise flush the bio to the
288  * device. If there is not a cached bio, or we just flushed it, then
289  * allocate a new one.
290  *
291  * Returns: The bio to use for log writes
292  */
293 
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno)294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
295 {
296 	struct bio *bio = sdp->sd_log_bio;
297 	u64 nblk;
298 
299 	if (bio) {
300 		nblk = bio_end_sector(bio);
301 		nblk >>= sdp->sd_fsb2bb_shift;
302 		if (blkno == nblk)
303 			return bio;
304 		gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
305 	}
306 
307 	return gfs2_log_alloc_bio(sdp, blkno);
308 }
309 
310 /**
311  * gfs2_log_write - write to log
312  * @sdp: the filesystem
313  * @page: the page to write
314  * @size: the size of the data to write
315  * @offset: the offset within the page
316  * @blkno: block number of the log entry
317  *
318  * Try and add the page segment to the current bio. If that fails,
319  * submit the current bio to the device and create a new one, and
320  * then add the page segment to that.
321  */
322 
gfs2_log_write(struct gfs2_sbd * sdp,struct page * page,unsigned size,unsigned offset,u64 blkno)323 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
324 		    unsigned size, unsigned offset, u64 blkno)
325 {
326 	struct bio *bio;
327 	int ret;
328 
329 	bio = gfs2_log_get_bio(sdp, blkno);
330 	ret = bio_add_page(bio, page, size, offset);
331 	if (ret == 0) {
332 		gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
333 		bio = gfs2_log_alloc_bio(sdp, blkno);
334 		ret = bio_add_page(bio, page, size, offset);
335 		WARN_ON(ret == 0);
336 	}
337 }
338 
339 /**
340  * gfs2_log_write_bh - write a buffer's content to the log
341  * @sdp: The super block
342  * @bh: The buffer pointing to the in-place location
343  *
344  * This writes the content of the buffer to the next available location
345  * in the log. The buffer will be unlocked once the i/o to the log has
346  * completed.
347  */
348 
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)349 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
350 {
351 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
352 		       gfs2_log_bmap(sdp));
353 }
354 
355 /**
356  * gfs2_log_write_page - write one block stored in a page, into the log
357  * @sdp: The superblock
358  * @page: The struct page
359  *
360  * This writes the first block-sized part of the page into the log. Note
361  * that the page must have been allocated from the gfs2_page_pool mempool
362  * and that after this has been called, ownership has been transferred and
363  * the page may be freed at any time.
364  */
365 
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)366 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
367 {
368 	struct super_block *sb = sdp->sd_vfs;
369 	gfs2_log_write(sdp, page, sb->s_blocksize, 0,
370 		       gfs2_log_bmap(sdp));
371 }
372 
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)373 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
374 				      u32 ld_length, u32 ld_data1)
375 {
376 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
377 	struct gfs2_log_descriptor *ld = page_address(page);
378 	clear_page(ld);
379 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
380 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
381 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
382 	ld->ld_type = cpu_to_be32(ld_type);
383 	ld->ld_length = cpu_to_be32(ld_length);
384 	ld->ld_data1 = cpu_to_be32(ld_data1);
385 	ld->ld_data2 = 0;
386 	return page;
387 }
388 
gfs2_check_magic(struct buffer_head * bh)389 static void gfs2_check_magic(struct buffer_head *bh)
390 {
391 	void *kaddr;
392 	__be32 *ptr;
393 
394 	clear_buffer_escaped(bh);
395 	kaddr = kmap_atomic(bh->b_page);
396 	ptr = kaddr + bh_offset(bh);
397 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
398 		set_buffer_escaped(bh);
399 	kunmap_atomic(kaddr);
400 }
401 
blocknr_cmp(void * priv,struct list_head * a,struct list_head * b)402 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
403 {
404 	struct gfs2_bufdata *bda, *bdb;
405 
406 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
407 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
408 
409 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
410 		return -1;
411 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
412 		return 1;
413 	return 0;
414 }
415 
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)416 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
417 				unsigned int total, struct list_head *blist,
418 				bool is_databuf)
419 {
420 	struct gfs2_log_descriptor *ld;
421 	struct gfs2_bufdata *bd1 = NULL, *bd2;
422 	struct page *page;
423 	unsigned int num;
424 	unsigned n;
425 	__be64 *ptr;
426 
427 	gfs2_log_lock(sdp);
428 	list_sort(NULL, blist, blocknr_cmp);
429 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
430 	while(total) {
431 		num = total;
432 		if (total > limit)
433 			num = limit;
434 		gfs2_log_unlock(sdp);
435 		page = gfs2_get_log_desc(sdp,
436 					 is_databuf ? GFS2_LOG_DESC_JDATA :
437 					 GFS2_LOG_DESC_METADATA, num + 1, num);
438 		ld = page_address(page);
439 		gfs2_log_lock(sdp);
440 		ptr = (__be64 *)(ld + 1);
441 
442 		n = 0;
443 		list_for_each_entry_continue(bd1, blist, bd_list) {
444 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
445 			if (is_databuf) {
446 				gfs2_check_magic(bd1->bd_bh);
447 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
448 			}
449 			if (++n >= num)
450 				break;
451 		}
452 
453 		gfs2_log_unlock(sdp);
454 		gfs2_log_write_page(sdp, page);
455 		gfs2_log_lock(sdp);
456 
457 		n = 0;
458 		list_for_each_entry_continue(bd2, blist, bd_list) {
459 			get_bh(bd2->bd_bh);
460 			gfs2_log_unlock(sdp);
461 			lock_buffer(bd2->bd_bh);
462 
463 			if (buffer_escaped(bd2->bd_bh)) {
464 				void *kaddr;
465 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
466 				ptr = page_address(page);
467 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
468 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
469 				       bd2->bd_bh->b_size);
470 				kunmap_atomic(kaddr);
471 				*(__be32 *)ptr = 0;
472 				clear_buffer_escaped(bd2->bd_bh);
473 				unlock_buffer(bd2->bd_bh);
474 				brelse(bd2->bd_bh);
475 				gfs2_log_write_page(sdp, page);
476 			} else {
477 				gfs2_log_write_bh(sdp, bd2->bd_bh);
478 			}
479 			gfs2_log_lock(sdp);
480 			if (++n >= num)
481 				break;
482 		}
483 
484 		BUG_ON(total < num);
485 		total -= num;
486 	}
487 	gfs2_log_unlock(sdp);
488 }
489 
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)490 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
491 {
492 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
493 	unsigned int nbuf;
494 	if (tr == NULL)
495 		return;
496 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
497 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
498 }
499 
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)500 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
501 {
502 	struct list_head *head;
503 	struct gfs2_bufdata *bd;
504 
505 	if (tr == NULL)
506 		return;
507 
508 	head = &tr->tr_buf;
509 	while (!list_empty(head)) {
510 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
511 		list_del_init(&bd->bd_list);
512 		gfs2_unpin(sdp, bd->bd_bh, tr);
513 	}
514 }
515 
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)516 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
517 			       struct gfs2_log_header_host *head, int pass)
518 {
519 	if (pass != 0)
520 		return;
521 
522 	jd->jd_found_blocks = 0;
523 	jd->jd_replayed_blocks = 0;
524 }
525 
buf_lo_scan_elements(struct gfs2_jdesc * jd,unsigned int start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)526 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
527 				struct gfs2_log_descriptor *ld, __be64 *ptr,
528 				int pass)
529 {
530 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
531 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
532 	struct gfs2_glock *gl = ip->i_gl;
533 	unsigned int blks = be32_to_cpu(ld->ld_data1);
534 	struct buffer_head *bh_log, *bh_ip;
535 	u64 blkno;
536 	int error = 0;
537 
538 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
539 		return 0;
540 
541 	gfs2_replay_incr_blk(jd, &start);
542 
543 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
544 		blkno = be64_to_cpu(*ptr++);
545 
546 		jd->jd_found_blocks++;
547 
548 		if (gfs2_revoke_check(jd, blkno, start))
549 			continue;
550 
551 		error = gfs2_replay_read_block(jd, start, &bh_log);
552 		if (error)
553 			return error;
554 
555 		bh_ip = gfs2_meta_new(gl, blkno);
556 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
557 
558 		if (gfs2_meta_check(sdp, bh_ip))
559 			error = -EIO;
560 		else
561 			mark_buffer_dirty(bh_ip);
562 
563 		brelse(bh_log);
564 		brelse(bh_ip);
565 
566 		if (error)
567 			break;
568 
569 		jd->jd_replayed_blocks++;
570 	}
571 
572 	return error;
573 }
574 
575 /**
576  * gfs2_meta_sync - Sync all buffers associated with a glock
577  * @gl: The glock
578  *
579  */
580 
gfs2_meta_sync(struct gfs2_glock * gl)581 static void gfs2_meta_sync(struct gfs2_glock *gl)
582 {
583 	struct address_space *mapping = gfs2_glock2aspace(gl);
584 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
585 	int error;
586 
587 	if (mapping == NULL)
588 		mapping = &sdp->sd_aspace;
589 
590 	filemap_fdatawrite(mapping);
591 	error = filemap_fdatawait(mapping);
592 
593 	if (error)
594 		gfs2_io_error(gl->gl_name.ln_sbd);
595 }
596 
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)597 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
598 {
599 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
600 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
601 
602 	if (error) {
603 		gfs2_meta_sync(ip->i_gl);
604 		return;
605 	}
606 	if (pass != 1)
607 		return;
608 
609 	gfs2_meta_sync(ip->i_gl);
610 
611 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
612 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
613 }
614 
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)615 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
616 {
617 	struct gfs2_meta_header *mh;
618 	unsigned int offset;
619 	struct list_head *head = &sdp->sd_log_le_revoke;
620 	struct gfs2_bufdata *bd;
621 	struct page *page;
622 	unsigned int length;
623 
624 	gfs2_write_revokes(sdp);
625 	if (!sdp->sd_log_num_revoke)
626 		return;
627 
628 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
629 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
630 	offset = sizeof(struct gfs2_log_descriptor);
631 
632 	list_for_each_entry(bd, head, bd_list) {
633 		sdp->sd_log_num_revoke--;
634 
635 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
636 
637 			gfs2_log_write_page(sdp, page);
638 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
639 			mh = page_address(page);
640 			clear_page(mh);
641 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
642 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
643 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
644 			offset = sizeof(struct gfs2_meta_header);
645 		}
646 
647 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
648 		offset += sizeof(u64);
649 	}
650 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
651 
652 	gfs2_log_write_page(sdp, page);
653 }
654 
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)655 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
656 {
657 	struct list_head *head = &sdp->sd_log_le_revoke;
658 	struct gfs2_bufdata *bd;
659 	struct gfs2_glock *gl;
660 
661 	while (!list_empty(head)) {
662 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
663 		list_del_init(&bd->bd_list);
664 		gl = bd->bd_gl;
665 		gfs2_glock_remove_revoke(gl);
666 		kmem_cache_free(gfs2_bufdata_cachep, bd);
667 	}
668 }
669 
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)670 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
671 				  struct gfs2_log_header_host *head, int pass)
672 {
673 	if (pass != 0)
674 		return;
675 
676 	jd->jd_found_revokes = 0;
677 	jd->jd_replay_tail = head->lh_tail;
678 }
679 
revoke_lo_scan_elements(struct gfs2_jdesc * jd,unsigned int start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)680 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
681 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
682 				   int pass)
683 {
684 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
685 	unsigned int blks = be32_to_cpu(ld->ld_length);
686 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
687 	struct buffer_head *bh;
688 	unsigned int offset;
689 	u64 blkno;
690 	int first = 1;
691 	int error;
692 
693 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
694 		return 0;
695 
696 	offset = sizeof(struct gfs2_log_descriptor);
697 
698 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
699 		error = gfs2_replay_read_block(jd, start, &bh);
700 		if (error)
701 			return error;
702 
703 		if (!first)
704 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
705 
706 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
707 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
708 
709 			error = gfs2_revoke_add(jd, blkno, start);
710 			if (error < 0) {
711 				brelse(bh);
712 				return error;
713 			}
714 			else if (error)
715 				jd->jd_found_revokes++;
716 
717 			if (!--revokes)
718 				break;
719 			offset += sizeof(u64);
720 		}
721 
722 		brelse(bh);
723 		offset = sizeof(struct gfs2_meta_header);
724 		first = 0;
725 	}
726 
727 	return 0;
728 }
729 
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)730 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
731 {
732 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
733 
734 	if (error) {
735 		gfs2_revoke_clean(jd);
736 		return;
737 	}
738 	if (pass != 1)
739 		return;
740 
741 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
742 	        jd->jd_jid, jd->jd_found_revokes);
743 
744 	gfs2_revoke_clean(jd);
745 }
746 
747 /**
748  * databuf_lo_before_commit - Scan the data buffers, writing as we go
749  *
750  */
751 
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)752 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
753 {
754 	unsigned int limit = databuf_limit(sdp);
755 	unsigned int nbuf;
756 	if (tr == NULL)
757 		return;
758 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
759 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
760 }
761 
databuf_lo_scan_elements(struct gfs2_jdesc * jd,unsigned int start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)762 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
763 				    struct gfs2_log_descriptor *ld,
764 				    __be64 *ptr, int pass)
765 {
766 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
767 	struct gfs2_glock *gl = ip->i_gl;
768 	unsigned int blks = be32_to_cpu(ld->ld_data1);
769 	struct buffer_head *bh_log, *bh_ip;
770 	u64 blkno;
771 	u64 esc;
772 	int error = 0;
773 
774 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
775 		return 0;
776 
777 	gfs2_replay_incr_blk(jd, &start);
778 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
779 		blkno = be64_to_cpu(*ptr++);
780 		esc = be64_to_cpu(*ptr++);
781 
782 		jd->jd_found_blocks++;
783 
784 		if (gfs2_revoke_check(jd, blkno, start))
785 			continue;
786 
787 		error = gfs2_replay_read_block(jd, start, &bh_log);
788 		if (error)
789 			return error;
790 
791 		bh_ip = gfs2_meta_new(gl, blkno);
792 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
793 
794 		/* Unescape */
795 		if (esc) {
796 			__be32 *eptr = (__be32 *)bh_ip->b_data;
797 			*eptr = cpu_to_be32(GFS2_MAGIC);
798 		}
799 		mark_buffer_dirty(bh_ip);
800 
801 		brelse(bh_log);
802 		brelse(bh_ip);
803 
804 		jd->jd_replayed_blocks++;
805 	}
806 
807 	return error;
808 }
809 
810 /* FIXME: sort out accounting for log blocks etc. */
811 
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)812 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
813 {
814 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
815 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
816 
817 	if (error) {
818 		gfs2_meta_sync(ip->i_gl);
819 		return;
820 	}
821 	if (pass != 1)
822 		return;
823 
824 	/* data sync? */
825 	gfs2_meta_sync(ip->i_gl);
826 
827 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
828 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
829 }
830 
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)831 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
832 {
833 	struct list_head *head;
834 	struct gfs2_bufdata *bd;
835 
836 	if (tr == NULL)
837 		return;
838 
839 	head = &tr->tr_databuf;
840 	while (!list_empty(head)) {
841 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
842 		list_del_init(&bd->bd_list);
843 		gfs2_unpin(sdp, bd->bd_bh, tr);
844 	}
845 }
846 
847 
848 const struct gfs2_log_operations gfs2_buf_lops = {
849 	.lo_before_commit = buf_lo_before_commit,
850 	.lo_after_commit = buf_lo_after_commit,
851 	.lo_before_scan = buf_lo_before_scan,
852 	.lo_scan_elements = buf_lo_scan_elements,
853 	.lo_after_scan = buf_lo_after_scan,
854 	.lo_name = "buf",
855 };
856 
857 const struct gfs2_log_operations gfs2_revoke_lops = {
858 	.lo_before_commit = revoke_lo_before_commit,
859 	.lo_after_commit = revoke_lo_after_commit,
860 	.lo_before_scan = revoke_lo_before_scan,
861 	.lo_scan_elements = revoke_lo_scan_elements,
862 	.lo_after_scan = revoke_lo_after_scan,
863 	.lo_name = "revoke",
864 };
865 
866 const struct gfs2_log_operations gfs2_databuf_lops = {
867 	.lo_before_commit = databuf_lo_before_commit,
868 	.lo_after_commit = databuf_lo_after_commit,
869 	.lo_scan_elements = databuf_lo_scan_elements,
870 	.lo_after_scan = databuf_lo_after_scan,
871 	.lo_name = "databuf",
872 };
873 
874 const struct gfs2_log_operations *gfs2_log_ops[] = {
875 	&gfs2_databuf_lops,
876 	&gfs2_buf_lops,
877 	&gfs2_revoke_lops,
878 	NULL,
879 };
880 
881