1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 #include <linux/security.h>
17
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "bmap.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "meta_io.h"
26 #include "recovery.h"
27 #include "rgrp.h"
28 #include "util.h"
29 #include "trans.h"
30 #include "dir.h"
31
32 struct workqueue_struct *gfs2_freeze_wq;
33
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 fs_err(gl->gl_name.ln_sbd,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 "state 0x%lx\n",
39 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 bh->b_page->mapping, bh->b_page->flags);
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
44 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
45 }
46
47 /**
48 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
49 * @gl: the glock
50 * @fsync: set when called from fsync (not all buffers will be clean)
51 *
52 * None of the buffers should be dirty, locked, or pinned.
53 */
54
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)55 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
56 unsigned int nr_revokes)
57 {
58 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
59 struct list_head *head = &gl->gl_ail_list;
60 struct gfs2_bufdata *bd, *tmp;
61 struct buffer_head *bh;
62 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
63
64 gfs2_log_lock(sdp);
65 spin_lock(&sdp->sd_ail_lock);
66 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
67 if (nr_revokes == 0)
68 break;
69 bh = bd->bd_bh;
70 if (bh->b_state & b_state) {
71 if (fsync)
72 continue;
73 gfs2_ail_error(gl, bh);
74 }
75 gfs2_trans_add_revoke(sdp, bd);
76 nr_revokes--;
77 }
78 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
79 spin_unlock(&sdp->sd_ail_lock);
80 gfs2_log_unlock(sdp);
81 }
82
83
gfs2_ail_empty_gl(struct gfs2_glock * gl)84 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
85 {
86 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
87 struct gfs2_trans tr;
88
89 memset(&tr, 0, sizeof(tr));
90 INIT_LIST_HEAD(&tr.tr_buf);
91 INIT_LIST_HEAD(&tr.tr_databuf);
92 INIT_LIST_HEAD(&tr.tr_ail1_list);
93 INIT_LIST_HEAD(&tr.tr_ail2_list);
94 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
95
96 if (!tr.tr_revokes)
97 return;
98
99 /* A shortened, inline version of gfs2_trans_begin()
100 * tr->alloced is not set since the transaction structure is
101 * on the stack */
102 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
103 tr.tr_ip = _RET_IP_;
104 if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
105 return;
106 WARN_ON_ONCE(current->journal_info);
107 current->journal_info = &tr;
108
109 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
110
111 gfs2_trans_end(sdp);
112 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
113 GFS2_LFC_AIL_EMPTY_GL);
114 }
115
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)116 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
117 {
118 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
119 unsigned int revokes = atomic_read(&gl->gl_ail_count);
120 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
121 int ret;
122
123 if (!revokes)
124 return;
125
126 while (revokes > max_revokes)
127 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
128
129 ret = gfs2_trans_begin(sdp, 0, max_revokes);
130 if (ret)
131 return;
132 __gfs2_ail_flush(gl, fsync, max_revokes);
133 gfs2_trans_end(sdp);
134 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
135 GFS2_LFC_AIL_FLUSH);
136 }
137
138 /**
139 * rgrp_go_sync - sync out the metadata for this glock
140 * @gl: the glock
141 *
142 * Called when demoting or unlocking an EX glock. We must flush
143 * to disk all dirty buffers/pages relating to this glock, and must not
144 * return to caller to demote/unlock the glock until I/O is complete.
145 */
146
rgrp_go_sync(struct gfs2_glock * gl)147 static void rgrp_go_sync(struct gfs2_glock *gl)
148 {
149 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
150 struct address_space *mapping = &sdp->sd_aspace;
151 struct gfs2_rgrpd *rgd;
152 int error;
153
154 spin_lock(&gl->gl_lockref.lock);
155 rgd = gl->gl_object;
156 if (rgd)
157 gfs2_rgrp_brelse(rgd);
158 spin_unlock(&gl->gl_lockref.lock);
159
160 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
161 return;
162 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
163
164 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
165 GFS2_LFC_RGRP_GO_SYNC);
166 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
167 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
168 mapping_set_error(mapping, error);
169 gfs2_ail_empty_gl(gl);
170
171 spin_lock(&gl->gl_lockref.lock);
172 rgd = gl->gl_object;
173 if (rgd)
174 gfs2_free_clones(rgd);
175 spin_unlock(&gl->gl_lockref.lock);
176 }
177
178 /**
179 * rgrp_go_inval - invalidate the metadata for this glock
180 * @gl: the glock
181 * @flags:
182 *
183 * We never used LM_ST_DEFERRED with resource groups, so that we
184 * should always see the metadata flag set here.
185 *
186 */
187
rgrp_go_inval(struct gfs2_glock * gl,int flags)188 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
189 {
190 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
191 struct address_space *mapping = &sdp->sd_aspace;
192 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
193
194 if (rgd)
195 gfs2_rgrp_brelse(rgd);
196
197 WARN_ON_ONCE(!(flags & DIO_METADATA));
198 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
199 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
200
201 if (rgd)
202 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
203 }
204
gfs2_glock2inode(struct gfs2_glock * gl)205 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
206 {
207 struct gfs2_inode *ip;
208
209 spin_lock(&gl->gl_lockref.lock);
210 ip = gl->gl_object;
211 if (ip)
212 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
213 spin_unlock(&gl->gl_lockref.lock);
214 return ip;
215 }
216
gfs2_glock2rgrp(struct gfs2_glock * gl)217 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
218 {
219 struct gfs2_rgrpd *rgd;
220
221 spin_lock(&gl->gl_lockref.lock);
222 rgd = gl->gl_object;
223 spin_unlock(&gl->gl_lockref.lock);
224
225 return rgd;
226 }
227
gfs2_clear_glop_pending(struct gfs2_inode * ip)228 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
229 {
230 if (!ip)
231 return;
232
233 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
234 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
235 }
236
237 /**
238 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
239 * @gl: the glock protecting the inode
240 *
241 */
242
inode_go_sync(struct gfs2_glock * gl)243 static void inode_go_sync(struct gfs2_glock *gl)
244 {
245 struct gfs2_inode *ip = gfs2_glock2inode(gl);
246 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
247 struct address_space *metamapping = gfs2_glock2aspace(gl);
248 int error;
249
250 if (isreg) {
251 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
252 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
253 inode_dio_wait(&ip->i_inode);
254 }
255 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
256 goto out;
257
258 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
259
260 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
261 GFS2_LFC_INODE_GO_SYNC);
262 filemap_fdatawrite(metamapping);
263 if (isreg) {
264 struct address_space *mapping = ip->i_inode.i_mapping;
265 filemap_fdatawrite(mapping);
266 error = filemap_fdatawait(mapping);
267 mapping_set_error(mapping, error);
268 }
269 error = filemap_fdatawait(metamapping);
270 mapping_set_error(metamapping, error);
271 gfs2_ail_empty_gl(gl);
272 /*
273 * Writeback of the data mapping may cause the dirty flag to be set
274 * so we have to clear it again here.
275 */
276 smp_mb__before_atomic();
277 clear_bit(GLF_DIRTY, &gl->gl_flags);
278
279 out:
280 gfs2_clear_glop_pending(ip);
281 }
282
283 /**
284 * inode_go_inval - prepare a inode glock to be released
285 * @gl: the glock
286 * @flags:
287 *
288 * Normally we invalidate everything, but if we are moving into
289 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
290 * can keep hold of the metadata, since it won't have changed.
291 *
292 */
293
inode_go_inval(struct gfs2_glock * gl,int flags)294 static void inode_go_inval(struct gfs2_glock *gl, int flags)
295 {
296 struct gfs2_inode *ip = gfs2_glock2inode(gl);
297
298 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
299
300 if (flags & DIO_METADATA) {
301 struct address_space *mapping = gfs2_glock2aspace(gl);
302 truncate_inode_pages(mapping, 0);
303 if (ip) {
304 set_bit(GIF_INVALID, &ip->i_flags);
305 forget_all_cached_acls(&ip->i_inode);
306 security_inode_invalidate_secctx(&ip->i_inode);
307 gfs2_dir_hash_inval(ip);
308 }
309 }
310
311 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
312 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
313 GFS2_LOG_HEAD_FLUSH_NORMAL |
314 GFS2_LFC_INODE_GO_INVAL);
315 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
316 }
317 if (ip && S_ISREG(ip->i_inode.i_mode))
318 truncate_inode_pages(ip->i_inode.i_mapping, 0);
319
320 gfs2_clear_glop_pending(ip);
321 }
322
323 /**
324 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
325 * @gl: the glock
326 *
327 * Returns: 1 if it's ok
328 */
329
inode_go_demote_ok(const struct gfs2_glock * gl)330 static int inode_go_demote_ok(const struct gfs2_glock *gl)
331 {
332 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
333
334 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
335 return 0;
336
337 return 1;
338 }
339
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)340 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
341 {
342 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
343 const struct gfs2_dinode *str = buf;
344 struct timespec64 atime;
345 u16 height, depth;
346
347 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
348 goto corrupt;
349 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
350 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
351 ip->i_inode.i_rdev = 0;
352 switch (ip->i_inode.i_mode & S_IFMT) {
353 case S_IFBLK:
354 case S_IFCHR:
355 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
356 be32_to_cpu(str->di_minor));
357 break;
358 };
359
360 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
361 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
362 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
363 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
364 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
365 atime.tv_sec = be64_to_cpu(str->di_atime);
366 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
367 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
368 ip->i_inode.i_atime = atime;
369 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
370 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
371 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
372 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
373
374 ip->i_goal = be64_to_cpu(str->di_goal_meta);
375 ip->i_generation = be64_to_cpu(str->di_generation);
376
377 ip->i_diskflags = be32_to_cpu(str->di_flags);
378 ip->i_eattr = be64_to_cpu(str->di_eattr);
379 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
380 gfs2_set_inode_flags(&ip->i_inode);
381 height = be16_to_cpu(str->di_height);
382 if (unlikely(height > sdp->sd_max_height))
383 goto corrupt;
384 ip->i_height = (u8)height;
385
386 depth = be16_to_cpu(str->di_depth);
387 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
388 goto corrupt;
389 ip->i_depth = (u8)depth;
390 ip->i_entries = be32_to_cpu(str->di_entries);
391
392 if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
393 goto corrupt;
394
395 if (S_ISREG(ip->i_inode.i_mode))
396 gfs2_set_aops(&ip->i_inode);
397
398 return 0;
399 corrupt:
400 gfs2_consist_inode(ip);
401 return -EIO;
402 }
403
404 /**
405 * gfs2_inode_refresh - Refresh the incore copy of the dinode
406 * @ip: The GFS2 inode
407 *
408 * Returns: errno
409 */
410
gfs2_inode_refresh(struct gfs2_inode * ip)411 int gfs2_inode_refresh(struct gfs2_inode *ip)
412 {
413 struct buffer_head *dibh;
414 int error;
415
416 error = gfs2_meta_inode_buffer(ip, &dibh);
417 if (error)
418 return error;
419
420 error = gfs2_dinode_in(ip, dibh->b_data);
421 brelse(dibh);
422 clear_bit(GIF_INVALID, &ip->i_flags);
423
424 return error;
425 }
426
427 /**
428 * inode_go_lock - operation done after an inode lock is locked by a process
429 * @gl: the glock
430 * @flags:
431 *
432 * Returns: errno
433 */
434
inode_go_lock(struct gfs2_holder * gh)435 static int inode_go_lock(struct gfs2_holder *gh)
436 {
437 struct gfs2_glock *gl = gh->gh_gl;
438 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
439 struct gfs2_inode *ip = gl->gl_object;
440 int error = 0;
441
442 if (!ip || (gh->gh_flags & GL_SKIP))
443 return 0;
444
445 if (test_bit(GIF_INVALID, &ip->i_flags)) {
446 error = gfs2_inode_refresh(ip);
447 if (error)
448 return error;
449 }
450
451 if (gh->gh_state != LM_ST_DEFERRED)
452 inode_dio_wait(&ip->i_inode);
453
454 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
455 (gl->gl_state == LM_ST_EXCLUSIVE) &&
456 (gh->gh_state == LM_ST_EXCLUSIVE)) {
457 spin_lock(&sdp->sd_trunc_lock);
458 if (list_empty(&ip->i_trunc_list))
459 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
460 spin_unlock(&sdp->sd_trunc_lock);
461 wake_up(&sdp->sd_quota_wait);
462 return 1;
463 }
464
465 return error;
466 }
467
468 /**
469 * inode_go_dump - print information about an inode
470 * @seq: The iterator
471 * @ip: the inode
472 *
473 */
474
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl)475 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
476 {
477 const struct gfs2_inode *ip = gl->gl_object;
478 if (ip == NULL)
479 return;
480 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
481 (unsigned long long)ip->i_no_formal_ino,
482 (unsigned long long)ip->i_no_addr,
483 IF2DT(ip->i_inode.i_mode), ip->i_flags,
484 (unsigned int)ip->i_diskflags,
485 (unsigned long long)i_size_read(&ip->i_inode));
486 }
487
488 /**
489 * freeze_go_sync - promote/demote the freeze glock
490 * @gl: the glock
491 * @state: the requested state
492 * @flags:
493 *
494 */
495
freeze_go_sync(struct gfs2_glock * gl)496 static void freeze_go_sync(struct gfs2_glock *gl)
497 {
498 int error = 0;
499 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
500
501 if (gl->gl_state == LM_ST_SHARED &&
502 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
503 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
504 error = freeze_super(sdp->sd_vfs);
505 if (error) {
506 printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
507 gfs2_assert_withdraw(sdp, 0);
508 }
509 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
510 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
511 GFS2_LFC_FREEZE_GO_SYNC);
512 }
513 }
514
515 /**
516 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
517 * @gl: the glock
518 *
519 */
520
freeze_go_xmote_bh(struct gfs2_glock * gl,struct gfs2_holder * gh)521 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
522 {
523 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
524 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
525 struct gfs2_glock *j_gl = ip->i_gl;
526 struct gfs2_log_header_host head;
527 int error;
528
529 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
530 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
531
532 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
533 if (error)
534 gfs2_consist(sdp);
535 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
536 gfs2_consist(sdp);
537
538 /* Initialize some head of the log stuff */
539 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
540 sdp->sd_log_sequence = head.lh_sequence + 1;
541 gfs2_log_pointers_init(sdp, head.lh_blkno);
542 }
543 }
544 return 0;
545 }
546
547 /**
548 * trans_go_demote_ok
549 * @gl: the glock
550 *
551 * Always returns 0
552 */
553
freeze_go_demote_ok(const struct gfs2_glock * gl)554 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
555 {
556 return 0;
557 }
558
559 /**
560 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
561 * @gl: the glock
562 *
563 * gl_lockref.lock lock is held while calling this
564 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)565 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
566 {
567 struct gfs2_inode *ip = gl->gl_object;
568 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
569
570 if (!remote || sb_rdonly(sdp->sd_vfs))
571 return;
572
573 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
574 gl->gl_state == LM_ST_SHARED && ip) {
575 gl->gl_lockref.count++;
576 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
577 gl->gl_lockref.count--;
578 }
579 }
580
581 const struct gfs2_glock_operations gfs2_meta_glops = {
582 .go_type = LM_TYPE_META,
583 };
584
585 const struct gfs2_glock_operations gfs2_inode_glops = {
586 .go_sync = inode_go_sync,
587 .go_inval = inode_go_inval,
588 .go_demote_ok = inode_go_demote_ok,
589 .go_lock = inode_go_lock,
590 .go_dump = inode_go_dump,
591 .go_type = LM_TYPE_INODE,
592 .go_flags = GLOF_ASPACE | GLOF_LRU,
593 };
594
595 const struct gfs2_glock_operations gfs2_rgrp_glops = {
596 .go_sync = rgrp_go_sync,
597 .go_inval = rgrp_go_inval,
598 .go_lock = gfs2_rgrp_go_lock,
599 .go_unlock = gfs2_rgrp_go_unlock,
600 .go_dump = gfs2_rgrp_dump,
601 .go_type = LM_TYPE_RGRP,
602 .go_flags = GLOF_LVB,
603 };
604
605 const struct gfs2_glock_operations gfs2_freeze_glops = {
606 .go_sync = freeze_go_sync,
607 .go_xmote_bh = freeze_go_xmote_bh,
608 .go_demote_ok = freeze_go_demote_ok,
609 .go_type = LM_TYPE_NONDISK,
610 };
611
612 const struct gfs2_glock_operations gfs2_iopen_glops = {
613 .go_type = LM_TYPE_IOPEN,
614 .go_callback = iopen_go_callback,
615 .go_flags = GLOF_LRU,
616 };
617
618 const struct gfs2_glock_operations gfs2_flock_glops = {
619 .go_type = LM_TYPE_FLOCK,
620 .go_flags = GLOF_LRU,
621 };
622
623 const struct gfs2_glock_operations gfs2_nondisk_glops = {
624 .go_type = LM_TYPE_NONDISK,
625 };
626
627 const struct gfs2_glock_operations gfs2_quota_glops = {
628 .go_type = LM_TYPE_QUOTA,
629 .go_flags = GLOF_LVB | GLOF_LRU,
630 };
631
632 const struct gfs2_glock_operations gfs2_journal_glops = {
633 .go_type = LM_TYPE_JOURNAL,
634 };
635
636 const struct gfs2_glock_operations *gfs2_glops_list[] = {
637 [LM_TYPE_META] = &gfs2_meta_glops,
638 [LM_TYPE_INODE] = &gfs2_inode_glops,
639 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
640 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
641 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
642 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
643 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
644 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
645 };
646
647