Lines Matching refs:gl

57 	struct gfs2_glock *gl;		/* current glock struct        */  member
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
119 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
131 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
132 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
134 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
135 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
139 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
141 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
143 BUG_ON(atomic_read(&gl->gl_revokes)); in gfs2_glock_free()
144 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_free()
146 wake_up_glock(gl); in gfs2_glock_free()
147 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
158 void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
160 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
161 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
171 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
173 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
175 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
177 if (!list_empty(&gl->gl_holders)) in demote_ok()
180 return glops->go_demote_ok(gl); in demote_ok()
185 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
187 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
192 list_del(&gl->gl_lru); in gfs2_glock_add_to_lru()
193 list_add_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
195 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
196 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
203 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
205 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
209 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
210 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
212 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
221 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in __gfs2_glock_queue_work() argument
222 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in __gfs2_glock_queue_work()
229 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in __gfs2_glock_queue_work()
230 gl->gl_lockref.count--; in __gfs2_glock_queue_work()
234 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in gfs2_glock_queue_work() argument
235 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
236 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_queue_work()
237 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
240 static void __gfs2_glock_put(struct gfs2_glock *gl) in __gfs2_glock_put() argument
242 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
243 struct address_space *mapping = gfs2_glock2aspace(gl); in __gfs2_glock_put()
245 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
247 gfs2_glock_remove_from_lru(gl); in __gfs2_glock_put()
248 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
249 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
250 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); in __gfs2_glock_put()
251 trace_gfs2_glock_put(gl); in __gfs2_glock_put()
252 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
258 void gfs2_glock_queue_put(struct gfs2_glock *gl) in gfs2_glock_queue_put() argument
260 gfs2_glock_queue_work(gl, 0); in gfs2_glock_queue_put()
269 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
271 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
274 __gfs2_glock_put(gl); in gfs2_glock_put()
285 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
287 …const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_l… in may_grant()
291 if (gl->gl_state == gh->gh_state) in may_grant()
295 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
301 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
318 static void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
322 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
345 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
346 __releases(&gl->gl_lockref.lock) in do_promote()
347 __acquires(&gl->gl_lockref.lock) in do_promote()
349 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
354 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
357 if (may_grant(gl, gh)) { in do_promote()
358 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
360 spin_unlock(&gl->gl_lockref.lock); in do_promote()
363 spin_lock(&gl->gl_lockref.lock); in do_promote()
383 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
385 do_error(gl, 0); in do_promote()
396 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
400 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
414 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
418 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
422 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
424 gl->gl_lockref.count++; in state_change()
426 gl->gl_lockref.count--; in state_change()
428 if (held1 && held2 && list_empty(&gl->gl_holders)) in state_change()
429 clear_bit(GLF_QUEUED, &gl->gl_flags); in state_change()
431 if (new_state != gl->gl_target) in state_change()
433 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
435 gl->gl_state = new_state; in state_change()
436 gl->gl_tchange = jiffies; in state_change()
439 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
441 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
442 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
444 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
454 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
456 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
461 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
462 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
463 state_change(gl, state); in finish_xmote()
464 gh = find_first_waiter(gl); in finish_xmote()
467 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
468 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
469 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
472 if (unlikely(state != gl->gl_target)) { in finish_xmote()
473 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
477 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
478 gh = find_first_waiter(gl); in finish_xmote()
479 gl->gl_target = gh->gh_state; in finish_xmote()
485 gl->gl_target = gl->gl_state; in finish_xmote()
486 do_error(gl, ret); in finish_xmote()
494 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
499 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
502 pr_err("wanted %u got %u\n", gl->gl_target, state); in finish_xmote()
503 GLOCK_BUG_ON(gl, 1); in finish_xmote()
505 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
510 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
511 gfs2_demote_wake(gl); in finish_xmote()
514 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
515 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
516 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
518 do_error(gl, rv); in finish_xmote()
522 rv = do_promote(gl); in finish_xmote()
527 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
529 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
540 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
541 __releases(&gl->gl_lockref.lock) in do_xmote()
542 __acquires(&gl->gl_lockref.lock) in do_xmote()
544 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
545 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
554 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
555 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
558 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
559 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
561 gl->gl_req = target; in do_xmote()
562 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
563 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
564 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
566 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
567 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
569 glops->go_sync(gl); in do_xmote()
570 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in do_xmote()
571 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
572 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
574 gfs2_glock_hold(gl); in do_xmote()
577 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
578 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
581 finish_xmote(gl, target); in do_xmote()
582 gfs2_glock_queue_work(gl, 0); in do_xmote()
586 GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN, in do_xmote()
590 finish_xmote(gl, target); in do_xmote()
591 gfs2_glock_queue_work(gl, 0); in do_xmote()
594 spin_lock(&gl->gl_lockref.lock); in do_xmote()
602 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
606 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
607 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in find_first_holder()
621 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
622 __releases(&gl->gl_lockref.lock) in run_queue()
623 __acquires(&gl->gl_lockref.lock) in run_queue()
628 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
631 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
633 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
634 gl->gl_demote_state != gl->gl_state) { in run_queue()
635 if (find_first_holder(gl)) in run_queue()
639 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
640 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
641 gl->gl_target = gl->gl_demote_state; in run_queue()
643 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
644 gfs2_demote_wake(gl); in run_queue()
645 ret = do_promote(gl); in run_queue()
650 gh = find_first_waiter(gl); in run_queue()
651 gl->gl_target = gh->gh_state; in run_queue()
653 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
655 do_xmote(gl, gh, gl->gl_target); in run_queue()
660 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
662 gl->gl_lockref.count++; in run_queue()
663 __gfs2_glock_queue_work(gl, 0); in run_queue()
667 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
674 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); in delete_work_func() local
675 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
677 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
682 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) in delete_work_func()
691 gfs2_glock_put(gl); in delete_work_func()
697 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
700 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
701 finish_xmote(gl, gl->gl_reply); in glock_work_func()
704 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
705 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
706 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
707 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
710 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
715 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
716 set_bit(GLF_DEMOTE, &gl->gl_flags); in glock_work_func()
719 run_queue(gl, 0); in glock_work_func()
723 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
725 __gfs2_glock_queue_work(gl, delay); in glock_work_func()
733 gl->gl_lockref.count -= drop_refs; in glock_work_func()
734 if (!gl->gl_lockref.count) { in glock_work_func()
735 __gfs2_glock_put(gl); in glock_work_func()
738 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
746 struct gfs2_glock *gl; in find_insert_glock() local
756 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, in find_insert_glock()
758 if (IS_ERR(gl)) in find_insert_glock()
761 gl = rhashtable_lookup_fast(&gl_hash_table, in find_insert_glock()
764 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
772 return gl; in find_insert_glock()
796 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
801 gl = find_insert_glock(&name, NULL); in gfs2_glock_get()
802 if (gl) { in gfs2_glock_get()
803 *glp = gl; in gfs2_glock_get()
813 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
814 if (!gl) in gfs2_glock_get()
817 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
820 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
821 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
822 kmem_cache_free(cachep, gl); in gfs2_glock_get()
828 gl->gl_node.next = NULL; in gfs2_glock_get()
829 gl->gl_flags = 0; in gfs2_glock_get()
830 gl->gl_name = name; in gfs2_glock_get()
831 gl->gl_lockref.count = 1; in gfs2_glock_get()
832 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
833 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
834 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
835 gl->gl_ops = glops; in gfs2_glock_get()
836 gl->gl_dstamp = 0; in gfs2_glock_get()
839 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
841 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
842 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
843 gl->gl_tchange = jiffies; in gfs2_glock_get()
844 gl->gl_object = NULL; in gfs2_glock_get()
845 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
846 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
847 INIT_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
849 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
859 tmp = find_insert_glock(&name, gl); in gfs2_glock_get()
861 *glp = gl; in gfs2_glock_get()
871 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
872 kmem_cache_free(cachep, gl); in gfs2_glock_get()
889 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, in gfs2_holder_init() argument
893 gh->gh_gl = gl; in gfs2_holder_init()
900 gfs2_glock_hold(gl); in gfs2_holder_init()
967 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
972 set_bit(bit, &gl->gl_flags); in handle_callback()
973 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
974 gl->gl_demote_state = state; in handle_callback()
975 gl->gl_demote_time = jiffies; in handle_callback()
976 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
977 gl->gl_demote_state != state) { in handle_callback()
978 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
980 if (gl->gl_ops->go_callback) in handle_callback()
981 gl->gl_ops->go_callback(gl, remote); in handle_callback()
982 trace_gfs2_demote_rq(gl, remote); in handle_callback()
1015 __releases(&gl->gl_lockref.lock) in add_to_queue()
1016 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1018 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
1019 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1029 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
1030 try_futile = !may_grant(gl, gh); in add_to_queue()
1031 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1035 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1051 set_bit(GLF_QUEUED, &gl->gl_flags); in add_to_queue()
1053 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1054 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1056 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1063 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in add_to_queue()
1065 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1067 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1068 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1081 gfs2_dump_glock(NULL, gl); in add_to_queue()
1096 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1097 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_nq()
1103 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1104 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1106 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1109 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1110 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1111 gl->gl_lockref.count++; in gfs2_glock_nq()
1112 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_nq()
1114 run_queue(gl, 1); in gfs2_glock_nq()
1115 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1143 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1144 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_glock_dq()
1148 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1150 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1154 if (find_first_holder(gl) == NULL) { in gfs2_glock_dq()
1156 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_glock_dq()
1157 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1159 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1160 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_dq()
1162 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1163 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1164 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1167 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in gfs2_glock_dq()
1168 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1172 gl->gl_lockref.count++; in gfs2_glock_dq()
1173 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1174 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1175 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1176 delay = gl->gl_hold_time; in gfs2_glock_dq()
1177 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_dq()
1179 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1184 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1187 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1218 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1221 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1223 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1224 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1338 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1344 gfs2_glock_hold(gl); in gfs2_glock_cb()
1345 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1346 if (test_bit(GLF_QUEUED, &gl->gl_flags) && in gfs2_glock_cb()
1347 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1350 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1351 delay = gl->gl_hold_time; in gfs2_glock_cb()
1354 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1355 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1356 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_cb()
1357 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1371 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1375 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1377 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1380 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1399 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1401 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1403 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1404 gl->gl_reply = ret; in gfs2_glock_complete()
1407 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1408 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1409 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1414 gl->gl_lockref.count++; in gfs2_glock_complete()
1415 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1416 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_complete()
1417 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1453 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1458 gl = list_entry(list->next, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1459 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1460 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1461 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1463 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1464 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1468 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1469 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1472 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1473 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1474 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1475 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1476 __gfs2_glock_queue_work(gl, 0); in gfs2_dispose_glock_lru()
1477 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1493 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1500 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1503 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1504 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1510 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1553 struct gfs2_glock *gl; in glock_hash_walk() local
1561 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) in glock_hash_walk()
1562 if (gl->gl_name.ln_sbd == sdp && in glock_hash_walk()
1563 lockref_get_not_dead(&gl->gl_lockref)) in glock_hash_walk()
1564 examiner(gl); in glock_hash_walk()
1567 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
1578 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1580 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { in thaw_glock()
1581 gfs2_glock_put(gl); in thaw_glock()
1584 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1585 gfs2_glock_queue_work(gl, 0); in thaw_glock()
1594 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1596 gfs2_glock_remove_from_lru(gl); in clear_glock()
1598 spin_lock(&gl->gl_lockref.lock); in clear_glock()
1599 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1600 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1601 __gfs2_glock_queue_work(gl, 0); in clear_glock()
1602 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
1616 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl) in dump_glock() argument
1618 spin_lock(&gl->gl_lockref.lock); in dump_glock()
1619 gfs2_dump_glock(seq, gl); in dump_glock()
1620 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
1623 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
1625 dump_glock(NULL, gl); in dump_glock_func()
1650 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
1654 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); in gfs2_glock_finish_truncate()
1656 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1657 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
1658 run_queue(gl, 1); in gfs2_glock_finish_truncate()
1659 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
1731 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
1733 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
1760 if (gl->gl_object) in gflags2str()
1785 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) in gfs2_dump_glock() argument
1787 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
1792 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
1794 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
1797 state2str(gl->gl_state), in gfs2_dump_glock()
1798 gl->gl_name.ln_type, in gfs2_dump_glock()
1799 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
1800 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
1801 state2str(gl->gl_target), in gfs2_dump_glock()
1802 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
1803 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
1804 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
1805 (int)gl->gl_lockref.count, gl->gl_hold_time); in gfs2_dump_glock()
1807 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
1810 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
1811 glops->go_dump(seq, gl); in gfs2_dump_glock()
1816 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
1819 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
1820 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
1821 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
1822 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
1823 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
1824 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
1825 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
1826 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
1827 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
1828 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
1933 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next() local
1935 if (gl) { in gfs2_glock_iter_next()
1938 if (!lockref_put_not_zero(&gl->gl_lockref)) in gfs2_glock_iter_next()
1939 gfs2_glock_queue_put(gl); in gfs2_glock_iter_next()
1942 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
1943 if (IS_ERR_OR_NULL(gl)) { in gfs2_glock_iter_next()
1944 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
1948 gl = NULL; in gfs2_glock_iter_next()
1951 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
1954 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
1958 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
1963 gi->gl = gl; in gfs2_glock_iter_next()
1988 return gi->gl; in gfs2_glock_seq_start()
1999 return gi->gl; in gfs2_glock_seq_next()
2078 gi->gl = NULL; in __gfs2_glocks_open()
2094 if (gi->gl) in gfs2_glocks_release()
2095 gfs2_glock_put(gi->gl); in gfs2_glocks_release()