Lines Matching refs:gl
54 struct gfs2_glock *gl; /* current glock struct */ member
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
116 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
128 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
129 if (gl->gl_ops->go_flags & GLOF_ASPACE) in gfs2_glock_dealloc()
130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
132 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) in glock_blocked_by_withdraw() argument
150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in glock_blocked_by_withdraw()
154 if (gl->gl_ops->go_flags & GLOF_NONDISK) in glock_blocked_by_withdraw()
157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) in glock_blocked_by_withdraw()
162 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); in gfs2_glock_free()
167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_free()
169 wake_up_glock(gl); in gfs2_glock_free()
170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
181 void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
184 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
194 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
196 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
198 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
200 if (!list_empty(&gl->gl_holders)) in demote_ok()
203 return glops->go_demote_ok(gl); in demote_ok()
208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
215 list_move_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
217 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
218 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
225 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
227 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
231 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
232 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
234 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
243 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in __gfs2_glock_queue_work() argument
244 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in __gfs2_glock_queue_work()
251 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in __gfs2_glock_queue_work()
252 gl->gl_lockref.count--; in __gfs2_glock_queue_work()
256 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in gfs2_glock_queue_work() argument
257 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
258 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_queue_work()
259 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
262 static void __gfs2_glock_put(struct gfs2_glock *gl) in __gfs2_glock_put() argument
264 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
265 struct address_space *mapping = gfs2_glock2aspace(gl); in __gfs2_glock_put()
267 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
269 gfs2_glock_remove_from_lru(gl); in __gfs2_glock_put()
270 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
271 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
275 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); in __gfs2_glock_put()
277 trace_gfs2_glock_put(gl); in __gfs2_glock_put()
278 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
284 void gfs2_glock_queue_put(struct gfs2_glock *gl) in gfs2_glock_queue_put() argument
286 gfs2_glock_queue_work(gl, 0); in gfs2_glock_queue_put()
295 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
297 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
300 __gfs2_glock_put(gl); in gfs2_glock_put()
311 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
313 …const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh… in may_grant()
331 if (gl->gl_state == gh->gh_state) in may_grant()
335 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
341 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
364 static void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
368 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
391 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
392 __releases(&gl->gl_lockref.lock) in do_promote()
393 __acquires(&gl->gl_lockref.lock) in do_promote()
395 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
400 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
403 if (may_grant(gl, gh)) { in do_promote()
404 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
406 spin_unlock(&gl->gl_lockref.lock); in do_promote()
409 spin_lock(&gl->gl_lockref.lock); in do_promote()
429 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
431 do_error(gl, 0); in do_promote()
442 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
446 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
459 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
463 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
467 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
469 gl->gl_lockref.count++; in state_change()
471 gl->gl_lockref.count--; in state_change()
473 if (new_state != gl->gl_target) in state_change()
475 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
477 gl->gl_state = new_state; in state_change()
478 gl->gl_tchange = jiffies; in state_change()
481 static void gfs2_set_demote(struct gfs2_glock *gl) in gfs2_set_demote() argument
483 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_set_demote()
485 set_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_set_demote()
490 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
492 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
493 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
495 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
505 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
507 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
512 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
513 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
514 state_change(gl, state); in finish_xmote()
515 gh = find_first_waiter(gl); in finish_xmote()
518 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
519 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
520 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
523 if (unlikely(state != gl->gl_target)) { in finish_xmote()
524 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
528 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
529 gh = find_first_waiter(gl); in finish_xmote()
530 gl->gl_target = gh->gh_state; in finish_xmote()
536 gl->gl_target = gl->gl_state; in finish_xmote()
537 do_error(gl, ret); in finish_xmote()
545 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
550 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
553 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", in finish_xmote()
554 gl->gl_target, state); in finish_xmote()
555 GLOCK_BUG_ON(gl, 1); in finish_xmote()
557 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
562 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
563 gfs2_demote_wake(gl); in finish_xmote()
566 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
567 rv = glops->go_xmote_bh(gl); in finish_xmote()
568 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
570 do_error(gl, rv); in finish_xmote()
574 rv = do_promote(gl); in finish_xmote()
579 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
581 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
584 static bool is_system_glock(struct gfs2_glock *gl) in is_system_glock() argument
586 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in is_system_glock()
589 if (gl == m_ip->i_gl) in is_system_glock()
602 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
603 __releases(&gl->gl_lockref.lock) in do_xmote()
604 __acquires(&gl->gl_lockref.lock) in do_xmote()
606 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
607 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
611 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && in do_xmote()
616 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
617 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
626 &gl->gl_flags)) in do_xmote()
628 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
630 gl->gl_req = target; in do_xmote()
631 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
632 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
633 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
635 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
636 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
638 ret = glops->go_sync(gl); in do_xmote()
646 gfs2_dump_glock(NULL, gl, true); in do_xmote()
651 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { in do_xmote()
659 if ((atomic_read(&gl->gl_ail_count) != 0) && in do_xmote()
661 gfs2_glock_assert_warn(gl, in do_xmote()
662 !atomic_read(&gl->gl_ail_count)); in do_xmote()
663 gfs2_dump_glock(NULL, gl, true); in do_xmote()
665 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
666 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
670 gfs2_glock_hold(gl); in do_xmote()
694 if (glock_blocked_by_withdraw(gl) && in do_xmote()
697 if (!is_system_glock(gl)) { in do_xmote()
698 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); in do_xmote()
701 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
707 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
708 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
711 finish_xmote(gl, target); in do_xmote()
712 gfs2_glock_queue_work(gl, 0); in do_xmote()
715 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); in do_xmote()
718 finish_xmote(gl, target); in do_xmote()
719 gfs2_glock_queue_work(gl, 0); in do_xmote()
722 spin_lock(&gl->gl_lockref.lock); in do_xmote()
730 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
734 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
735 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in find_first_holder()
749 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
750 __releases(&gl->gl_lockref.lock) in run_queue()
751 __acquires(&gl->gl_lockref.lock) in run_queue()
756 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
759 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
761 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
762 gl->gl_demote_state != gl->gl_state) { in run_queue()
763 if (find_first_holder(gl)) in run_queue()
767 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
768 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
769 gl->gl_target = gl->gl_demote_state; in run_queue()
771 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
772 gfs2_demote_wake(gl); in run_queue()
773 ret = do_promote(gl); in run_queue()
778 gh = find_first_waiter(gl); in run_queue()
779 gl->gl_target = gh->gh_state; in run_queue()
781 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
783 do_xmote(gl, gh, gl->gl_target); in run_queue()
788 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
790 gl->gl_lockref.count++; in run_queue()
791 __gfs2_glock_queue_work(gl, 0); in run_queue()
795 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
800 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) in gfs2_inode_remember_delete() argument
802 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_remember_delete()
810 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) in gfs2_inode_already_deleted() argument
812 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_already_deleted()
819 static void gfs2_glock_poke(struct gfs2_glock *gl) in gfs2_glock_poke() argument
825 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); in gfs2_glock_poke()
832 static bool gfs2_try_evict(struct gfs2_glock *gl) in gfs2_try_evict() argument
847 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
848 ip = gl->gl_object; in gfs2_try_evict()
851 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
855 gl->gl_no_formal_ino = ip->i_no_formal_ino; in gfs2_try_evict()
861 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
862 ip = gl->gl_object; in gfs2_try_evict()
868 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
881 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); in delete_work_func() local
882 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
884 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
886 spin_lock(&gl->gl_lockref.lock); in delete_work_func()
887 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); in delete_work_func()
888 spin_unlock(&gl->gl_lockref.lock); in delete_work_func()
890 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { in delete_work_func()
908 if (gfs2_try_evict(gl)) { in delete_work_func()
909 if (gfs2_queue_delete_work(gl, 5 * HZ)) in delete_work_func()
915 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, in delete_work_func()
922 gfs2_glock_put(gl); in delete_work_func()
928 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
931 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
932 finish_xmote(gl, gl->gl_reply); in glock_work_func()
935 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
936 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
937 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
938 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
941 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
946 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
947 gfs2_set_demote(gl); in glock_work_func()
950 run_queue(gl, 0); in glock_work_func()
954 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
956 __gfs2_glock_queue_work(gl, delay); in glock_work_func()
964 gl->gl_lockref.count -= drop_refs; in glock_work_func()
965 if (!gl->gl_lockref.count) { in glock_work_func()
966 __gfs2_glock_put(gl); in glock_work_func()
969 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
977 struct gfs2_glock *gl; in find_insert_glock() local
987 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, in find_insert_glock()
989 if (IS_ERR(gl)) in find_insert_glock()
992 gl = rhashtable_lookup_fast(&gl_hash_table, in find_insert_glock()
995 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
1003 return gl; in find_insert_glock()
1027 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
1032 gl = find_insert_glock(&name, NULL); in gfs2_glock_get()
1033 if (gl) { in gfs2_glock_get()
1034 *glp = gl; in gfs2_glock_get()
1044 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
1045 if (!gl) in gfs2_glock_get()
1048 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
1051 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
1052 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
1053 kmem_cache_free(cachep, gl); in gfs2_glock_get()
1059 gl->gl_node.next = NULL; in gfs2_glock_get()
1060 gl->gl_flags = 0; in gfs2_glock_get()
1061 gl->gl_name = name; in gfs2_glock_get()
1062 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); in gfs2_glock_get()
1063 gl->gl_lockref.count = 1; in gfs2_glock_get()
1064 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
1065 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
1066 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
1067 gl->gl_ops = glops; in gfs2_glock_get()
1068 gl->gl_dstamp = 0; in gfs2_glock_get()
1071 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
1073 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
1074 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
1075 gl->gl_tchange = jiffies; in gfs2_glock_get()
1076 gl->gl_object = NULL; in gfs2_glock_get()
1077 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
1078 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
1079 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) in gfs2_glock_get()
1080 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
1082 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
1092 tmp = find_insert_glock(&name, gl); in gfs2_glock_get()
1094 *glp = gl; in gfs2_glock_get()
1104 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
1105 kmem_cache_free(cachep, gl); in gfs2_glock_get()
1122 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, in gfs2_holder_init() argument
1126 gh->gh_gl = gl; in gfs2_holder_init()
1133 gfs2_glock_hold(gl); in gfs2_holder_init()
1170 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, in gfs2_glock_update_hold_time() argument
1176 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, in gfs2_glock_update_hold_time()
1294 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
1298 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in handle_callback()
1300 gfs2_set_demote(gl); in handle_callback()
1301 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
1302 gl->gl_demote_state = state; in handle_callback()
1303 gl->gl_demote_time = jiffies; in handle_callback()
1304 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
1305 gl->gl_demote_state != state) { in handle_callback()
1306 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
1308 if (gl->gl_ops->go_callback) in handle_callback()
1309 gl->gl_ops->go_callback(gl, remote); in handle_callback()
1310 trace_gfs2_demote_rq(gl, remote); in handle_callback()
1343 __releases(&gl->gl_lockref.lock) in add_to_queue()
1344 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1346 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
1347 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1352 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1354 GLOCK_BUG_ON(gl, true); in add_to_queue()
1357 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
1358 try_futile = !may_grant(gl, gh); in add_to_queue()
1359 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1363 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1380 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1381 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1383 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1390 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1392 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1394 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1395 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1408 gfs2_dump_glock(NULL, gl, true); in add_to_queue()
1423 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1426 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1429 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1430 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1432 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1435 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1436 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1437 gl->gl_lockref.count++; in gfs2_glock_nq()
1438 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_nq()
1440 run_queue(gl, 1); in gfs2_glock_nq()
1441 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1469 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1470 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_dq()
1474 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1483 glock_blocked_by_withdraw(gl) && in gfs2_glock_dq()
1486 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1490 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1493 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1497 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1498 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1499 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1502 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in gfs2_glock_dq()
1503 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1507 gl->gl_lockref.count++; in gfs2_glock_dq()
1508 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1509 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1510 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1511 delay = gl->gl_hold_time; in gfs2_glock_dq()
1512 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_dq()
1514 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1519 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1522 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1553 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1556 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1558 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1559 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1674 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1680 gfs2_glock_hold(gl); in gfs2_glock_cb()
1681 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1682 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1683 if (!list_empty(&gl->gl_holders) && in gfs2_glock_cb()
1684 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1687 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1688 delay = gl->gl_hold_time; in gfs2_glock_cb()
1690 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1691 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_cb()
1692 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1706 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1710 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1712 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1715 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1734 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1736 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1738 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1739 gl->gl_reply = ret; in gfs2_glock_complete()
1742 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1743 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1744 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1749 gl->gl_lockref.count++; in gfs2_glock_complete()
1750 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1751 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_complete()
1752 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1789 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1794 gl = list_first_entry(list, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1795 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1796 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1797 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1799 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1800 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1804 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1805 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1808 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1809 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1810 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1811 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1812 __gfs2_glock_queue_work(gl, 0); in gfs2_dispose_glock_lru()
1813 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1829 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1836 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1839 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1840 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1846 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1888 struct gfs2_glock *gl; in glock_hash_walk() local
1896 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) in glock_hash_walk()
1897 if (gl->gl_name.ln_sbd == sdp && in glock_hash_walk()
1898 lockref_get_not_dead(&gl->gl_lockref)) in glock_hash_walk()
1899 examiner(gl); in glock_hash_walk()
1902 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
1907 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) in gfs2_queue_delete_work() argument
1911 spin_lock(&gl->gl_lockref.lock); in gfs2_queue_delete_work()
1913 &gl->gl_delete, delay); in gfs2_queue_delete_work()
1915 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_queue_delete_work()
1916 spin_unlock(&gl->gl_lockref.lock); in gfs2_queue_delete_work()
1920 void gfs2_cancel_delete_work(struct gfs2_glock *gl) in gfs2_cancel_delete_work() argument
1922 if (cancel_delayed_work_sync(&gl->gl_delete)) { in gfs2_cancel_delete_work()
1923 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_cancel_delete_work()
1924 gfs2_glock_put(gl); in gfs2_cancel_delete_work()
1928 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) in gfs2_delete_work_queued() argument
1930 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_delete_work_queued()
1933 static void flush_delete_work(struct gfs2_glock *gl) in flush_delete_work() argument
1935 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { in flush_delete_work()
1936 if (cancel_delayed_work(&gl->gl_delete)) { in flush_delete_work()
1938 &gl->gl_delete, 0); in flush_delete_work()
1941 gfs2_glock_queue_work(gl, 0); in flush_delete_work()
1956 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1958 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { in thaw_glock()
1959 gfs2_glock_put(gl); in thaw_glock()
1962 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1963 gfs2_glock_queue_work(gl, 0); in thaw_glock()
1972 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1974 gfs2_glock_remove_from_lru(gl); in clear_glock()
1976 spin_lock(&gl->gl_lockref.lock); in clear_glock()
1977 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1978 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1979 __gfs2_glock_queue_work(gl, 0); in clear_glock()
1980 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
1994 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in dump_glock() argument
1996 spin_lock(&gl->gl_lockref.lock); in dump_glock()
1997 gfs2_dump_glock(seq, gl, fsid); in dump_glock()
1998 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
2001 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
2003 dump_glock(NULL, gl, true); in dump_glock_func()
2027 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
2031 gfs2_glock_assert_withdraw(gl, ret == 0); in gfs2_glock_finish_truncate()
2033 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
2034 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
2035 run_queue(gl, 1); in gfs2_glock_finish_truncate()
2036 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
2110 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
2112 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
2135 if (!list_empty(&gl->gl_holders)) in gflags2str()
2139 if (gl->gl_object) in gflags2str()
2169 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in gfs2_dump_glock() argument
2171 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
2175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_dump_glock()
2179 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_dump_glock()
2180 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_dump_glock()
2187 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
2189 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
2193 fs_id_buf, state2str(gl->gl_state), in gfs2_dump_glock()
2194 gl->gl_name.ln_type, in gfs2_dump_glock()
2195 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
2196 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
2197 state2str(gl->gl_target), in gfs2_dump_glock()
2198 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
2199 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
2200 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
2201 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); in gfs2_dump_glock()
2203 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2206 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
2207 glops->go_dump(seq, gl, fs_id_buf); in gfs2_dump_glock()
2212 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
2215 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
2216 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
2217 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
2218 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
2219 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
2220 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
2221 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
2222 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
2223 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
2224 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
2329 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next() local
2331 if (gl) { in gfs2_glock_iter_next()
2334 if (!lockref_put_not_zero(&gl->gl_lockref)) in gfs2_glock_iter_next()
2335 gfs2_glock_queue_put(gl); in gfs2_glock_iter_next()
2338 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
2339 if (IS_ERR_OR_NULL(gl)) { in gfs2_glock_iter_next()
2340 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
2344 gl = NULL; in gfs2_glock_iter_next()
2347 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
2350 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2354 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2359 gi->gl = gl; in gfs2_glock_iter_next()
2384 return gi->gl; in gfs2_glock_seq_start()
2395 return gi->gl; in gfs2_glock_seq_next()
2474 gi->gl = NULL; in __gfs2_glocks_open()
2490 if (gi->gl) in gfs2_glocks_release()
2491 gfs2_glock_put(gi->gl); in gfs2_glocks_release()