Lines Matching +full:acquisition +full:- +full:time +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
100 struct lm_lockname *wait_name = wait_glock->name; in glock_wake_function()
103 if (wake_name->ln_sbd != wait_name->ln_sbd || in glock_wake_function()
104 wake_name->ln_number != wait_name->ln_number || in glock_wake_function()
105 wake_name->ln_type != wait_name->ln_type) in glock_wake_function()
118 * wake_up_glock - Wake up waiters on a glock
123 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
126 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
133 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
134 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
143 * glock_blocked_by_withdraw - determine if we can still use a glock
157 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in glock_blocked_by_withdraw()
161 if (gl->gl_ops->go_flags & GLOF_NONDISK) in glock_blocked_by_withdraw()
163 if (!sdp->sd_jdesc || in glock_blocked_by_withdraw()
164 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) in glock_blocked_by_withdraw()
171 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
173 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); in gfs2_glock_free()
174 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_free()
177 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
178 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) in gfs2_glock_free()
179 wake_up(&sdp->sd_kill_wait); in gfs2_glock_free()
183 * gfs2_glock_hold() - increment reference count on glock
190 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
191 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
196 * demote_ok - Check to see if it's ok to unlock a glock
204 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
206 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
208 if (!list_empty(&gl->gl_holders)) in demote_ok()
210 if (glops->go_demote_ok) in demote_ok()
211 return glops->go_demote_ok(gl); in demote_ok()
218 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
223 list_move_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
225 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
226 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
235 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
239 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
240 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
242 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
252 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in __gfs2_glock_queue_work()
259 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in __gfs2_glock_queue_work()
260 gl->gl_lockref.count--; in __gfs2_glock_queue_work()
265 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
267 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
272 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
275 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
276 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
278 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
285 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
297 * gfs2_glock_put() - Decrement reference count on glock
304 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
311 * may_grant - check if it's ok to grant a new lock
329 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, &current_gh->gh_iflags)); in may_grant()
331 switch(current_gh->gh_state) { in may_grant()
340 return gh->gh_state == LM_ST_EXCLUSIVE && in may_grant()
341 (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && in may_grant()
342 (gh->gh_flags & LM_FLAG_NODE_SCOPE); in may_grant()
346 return gh->gh_state == current_gh->gh_state; in may_grant()
353 if (gl->gl_state == gh->gh_state) in may_grant()
355 if (gh->gh_flags & GL_EXACT) in may_grant()
357 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
358 return gh->gh_state == LM_ST_SHARED || in may_grant()
359 gh->gh_state == LM_ST_DEFERRED; in may_grant()
361 if (gh->gh_flags & LM_FLAG_ANY) in may_grant()
362 return gl->gl_state != LM_ST_UNLOCKED; in may_grant()
368 clear_bit(HIF_WAIT, &gh->gh_iflags); in gfs2_holder_wake()
370 wake_up_bit(&gh->gh_iflags, HIF_WAIT); in gfs2_holder_wake()
371 if (gh->gh_flags & GL_ASYNC) { in gfs2_holder_wake()
372 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; in gfs2_holder_wake()
374 wake_up(&sdp->sd_async_glock_wait); in gfs2_holder_wake()
379 * do_error - Something unexpected has happened during a lock request
388 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
389 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_error()
392 gh->gh_error = -EIO; in do_error()
393 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) in do_error()
394 gh->gh_error = GLR_TRYFAILED; in do_error()
397 list_del_init(&gh->gh_list); in do_error()
404 * find_first_holder - find the first "holder" gh
412 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
413 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, in find_first_holder()
415 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_holder()
422 * gfs2_instantiate - Call the glops instantiate function
429 struct gfs2_glock *gl = gh->gh_gl; in gfs2_instantiate()
430 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_instantiate()
434 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) in gfs2_instantiate()
441 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { in gfs2_instantiate()
442 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, in gfs2_instantiate()
454 ret = glops->go_instantiate(gl); in gfs2_instantiate()
456 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); in gfs2_instantiate()
457 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); in gfs2_instantiate()
462 if (glops->go_held) in gfs2_instantiate()
463 return glops->go_held(gh); in gfs2_instantiate()
468 * do_promote - promote as many requests as possible on the current queue
479 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in do_promote()
480 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_promote()
489 if (list_is_first(&gh->gh_list, &gl->gl_holders)) in do_promote()
494 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
504 * find_first_waiter - find the first gh that's waiting for the glock
512 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
513 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_waiter()
520 * state_change - record that the glock is now in a different state
529 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
533 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
535 gl->gl_lockref.count++; in state_change()
537 gl->gl_lockref.count--; in state_change()
539 if (new_state != gl->gl_target) in state_change()
540 /* shorten our minimum hold time */ in state_change()
541 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
543 gl->gl_state = new_state; in state_change()
544 gl->gl_tchange = jiffies; in state_change()
549 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_set_demote()
551 set_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_set_demote()
553 wake_up(&sdp->sd_async_glock_wait); in gfs2_set_demote()
558 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
559 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
561 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
565 * finish_xmote - The DLM has replied to one of our lock requests
573 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
577 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
583 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
584 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
585 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
588 if (unlikely(state != gl->gl_target)) { in finish_xmote()
591 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
594 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
596 gl->gl_target = gh->gh_state; in finish_xmote()
601 /* Some error or failed "try lock" - report it */ in finish_xmote()
603 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { in finish_xmote()
604 gl->gl_target = gl->gl_state; in finish_xmote()
613 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
621 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", in finish_xmote()
622 gl->gl_target, state); in finish_xmote()
625 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
629 /* Fast path - we got what we asked for */ in finish_xmote()
630 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
633 if (glops->go_xmote_bh) { in finish_xmote()
636 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
637 rv = glops->go_xmote_bh(gl); in finish_xmote()
638 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
647 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
648 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
653 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in is_system_glock()
654 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); in is_system_glock()
656 if (gl == m_ip->i_gl) in is_system_glock()
662 * do_xmote - Calls the DLM to change the state of a lock
671 __releases(&gl->gl_lockref.lock) in do_xmote()
672 __acquires(&gl->gl_lockref.lock) in do_xmote()
674 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
675 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
676 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); in do_xmote()
680 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) in do_xmote()
684 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
685 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
687 glops->go_inval) { in do_xmote()
694 &gl->gl_flags)) in do_xmote()
698 gl->gl_req = target; in do_xmote()
699 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
700 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
701 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
703 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
704 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
705 if (glops->go_sync) { in do_xmote()
706 ret = glops->go_sync(gl); in do_xmote()
712 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { in do_xmote()
719 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { in do_xmote()
727 if ((atomic_read(&gl->gl_ail_count) != 0) && in do_xmote()
728 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { in do_xmote()
730 !atomic_read(&gl->gl_ail_count)); in do_xmote()
733 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
734 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
743 * change the mount to read-only. Most importantly, we must not call in do_xmote()
760 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) in do_xmote()
764 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { in do_xmote()
778 clear_bit(GLF_LOCK, &gl->gl_flags); in do_xmote()
779 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
783 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
787 if (sdp->sd_lockstruct.ls_ops->lm_lock) { in do_xmote()
789 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
790 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
792 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { in do_xmote()
804 spin_lock(&gl->gl_lockref.lock); in do_xmote()
808 * run_queue - do all outstanding tasks related to a glock
815 __releases(&gl->gl_lockref.lock) in run_queue()
816 __acquires(&gl->gl_lockref.lock) in run_queue()
820 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
823 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
825 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
826 gl->gl_demote_state != gl->gl_state) { in run_queue()
831 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
832 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
833 gl->gl_target = gl->gl_demote_state; in run_queue()
835 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
840 gl->gl_target = gh->gh_state; in run_queue()
841 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) in run_queue()
844 do_xmote(gl, gh, gl->gl_target); in run_queue()
848 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
850 gl->gl_lockref.count++; in run_queue()
855 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
861 * glock_set_object - set the gl_object field of a glock
869 spin_lock(&gl->gl_lockref.lock); in glock_set_object()
870 prev_object = gl->gl_object; in glock_set_object()
871 gl->gl_object = object; in glock_set_object()
872 spin_unlock(&gl->gl_lockref.lock); in glock_set_object()
873 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) { in glock_set_object()
875 gl->gl_name.ln_type, in glock_set_object()
876 (unsigned long long)gl->gl_name.ln_number); in glock_set_object()
882 * glock_clear_object - clear the gl_object field of a glock
890 spin_lock(&gl->gl_lockref.lock); in glock_clear_object()
891 prev_object = gl->gl_object; in glock_clear_object()
892 gl->gl_object = NULL; in glock_clear_object()
893 spin_unlock(&gl->gl_lockref.lock); in glock_clear_object()
894 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) { in glock_clear_object()
896 gl->gl_name.ln_type, in glock_clear_object()
897 (unsigned long long)gl->gl_name.ln_number); in glock_clear_object()
904 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_remember_delete()
906 if (ri->ri_magic == 0) in gfs2_inode_remember_delete()
907 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); in gfs2_inode_remember_delete()
908 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) in gfs2_inode_remember_delete()
909 ri->ri_generation_deleted = cpu_to_be64(generation); in gfs2_inode_remember_delete()
914 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_already_deleted()
916 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) in gfs2_inode_already_deleted()
918 return generation <= be64_to_cpu(ri->ri_generation_deleted); in gfs2_inode_already_deleted()
949 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
950 ip = gl->gl_object; in gfs2_try_evict()
951 if (ip && !igrab(&ip->i_inode)) in gfs2_try_evict()
953 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
955 gl->gl_no_formal_ino = ip->i_no_formal_ino; in gfs2_try_evict()
956 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); in gfs2_try_evict()
957 d_prune_aliases(&ip->i_inode); in gfs2_try_evict()
958 iput(&ip->i_inode); in gfs2_try_evict()
960 /* If the inode was evicted, gl->gl_object will now be NULL. */ in gfs2_try_evict()
961 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
962 ip = gl->gl_object; in gfs2_try_evict()
964 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); in gfs2_try_evict()
965 if (!igrab(&ip->i_inode)) in gfs2_try_evict()
968 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
970 gfs2_glock_poke(ip->i_gl); in gfs2_try_evict()
971 iput(&ip->i_inode); in gfs2_try_evict()
980 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_queue_try_to_evict()
982 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) in gfs2_queue_try_to_evict()
984 return queue_delayed_work(sdp->sd_delete_wq, in gfs2_queue_try_to_evict()
985 &gl->gl_delete, 0); in gfs2_queue_try_to_evict()
990 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_queue_verify_evict()
992 if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) in gfs2_queue_verify_evict()
994 return queue_delayed_work(sdp->sd_delete_wq, in gfs2_queue_verify_evict()
995 &gl->gl_delete, 5 * HZ); in gfs2_queue_verify_evict()
1002 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
1004 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
1006 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) { in delete_work_func()
1009 * delete the inode some time before verifying that the delete in delete_work_func()
1020 * rework won't cooperate. At a later time, when we no longer in delete_work_func()
1025 if (test_bit(SDF_KILL, &sdp->sd_flags)) in delete_work_func()
1033 if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) { in delete_work_func()
1034 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, in delete_work_func()
1037 if (PTR_ERR(inode) == -EAGAIN && in delete_work_func()
1038 !test_bit(SDF_KILL, &sdp->sd_flags) && in delete_work_func()
1057 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
1058 finish_xmote(gl, gl->gl_reply); in glock_work_func()
1061 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
1062 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
1063 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
1064 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
1067 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
1069 delay = holdtime - now; in glock_work_func()
1072 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
1079 drop_refs--; in glock_work_func()
1080 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
1090 gl->gl_lockref.count -= drop_refs; in glock_work_func()
1091 if (!gl->gl_lockref.count) { in glock_work_func()
1095 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
1114 &new->gl_node, ht_parms); in find_insert_glock()
1121 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
1133 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
1149 struct super_block *s = sdp->sd_vfs; in gfs2_glock_get()
1151 .ln_type = glops->go_type, in gfs2_glock_get()
1163 return -ENOENT; in gfs2_glock_get()
1165 if (glops->go_flags & GLOF_ASPACE) { in gfs2_glock_get()
1169 return -ENOMEM; in gfs2_glock_get()
1170 gl = &gla->glock; in gfs2_glock_get()
1174 return -ENOMEM; in gfs2_glock_get()
1176 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
1177 gl->gl_ops = glops; in gfs2_glock_get()
1179 if (glops->go_flags & GLOF_LVB) { in gfs2_glock_get()
1180 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
1181 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
1182 gfs2_glock_dealloc(&gl->gl_rcu); in gfs2_glock_get()
1183 return -ENOMEM; in gfs2_glock_get()
1187 atomic_inc(&sdp->sd_glock_disposal); in gfs2_glock_get()
1188 gl->gl_node.next = NULL; in gfs2_glock_get()
1189 gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0; in gfs2_glock_get()
1190 gl->gl_name = name; in gfs2_glock_get()
1191 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); in gfs2_glock_get()
1192 gl->gl_lockref.count = 1; in gfs2_glock_get()
1193 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
1194 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
1195 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
1196 gl->gl_dstamp = 0; in gfs2_glock_get()
1198 /* We use the global stats to estimate the initial per-glock stats */ in gfs2_glock_get()
1199 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
1201 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
1202 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
1203 gl->gl_tchange = jiffies; in gfs2_glock_get()
1204 gl->gl_object = NULL; in gfs2_glock_get()
1205 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
1206 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
1207 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) in gfs2_glock_get()
1208 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
1212 mapping->a_ops = &gfs2_meta_aops; in gfs2_glock_get()
1213 mapping->host = s->s_bdev->bd_inode; in gfs2_glock_get()
1214 mapping->flags = 0; in gfs2_glock_get()
1216 mapping->private_data = NULL; in gfs2_glock_get()
1217 mapping->writeback_index = 0; in gfs2_glock_get()
1232 gfs2_glock_dealloc(&gl->gl_rcu); in gfs2_glock_get()
1233 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) in gfs2_glock_get()
1234 wake_up(&sdp->sd_kill_wait); in gfs2_glock_get()
1241 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
1252 INIT_LIST_HEAD(&gh->gh_list); in __gfs2_holder_init()
1253 gh->gh_gl = gfs2_glock_hold(gl); in __gfs2_holder_init()
1254 gh->gh_ip = ip; in __gfs2_holder_init()
1255 gh->gh_owner_pid = get_pid(task_pid(current)); in __gfs2_holder_init()
1256 gh->gh_state = state; in __gfs2_holder_init()
1257 gh->gh_flags = flags; in __gfs2_holder_init()
1258 gh->gh_iflags = 0; in __gfs2_holder_init()
1262 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
1273 gh->gh_state = state; in gfs2_holder_reinit()
1274 gh->gh_flags = flags; in gfs2_holder_reinit()
1275 gh->gh_iflags = 0; in gfs2_holder_reinit()
1276 gh->gh_ip = _RET_IP_; in gfs2_holder_reinit()
1277 put_pid(gh->gh_owner_pid); in gfs2_holder_reinit()
1278 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_reinit()
1282 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
1289 put_pid(gh->gh_owner_pid); in gfs2_holder_uninit()
1290 gfs2_glock_put(gh->gh_gl); in gfs2_holder_uninit()
1292 gh->gh_ip = 0; in gfs2_holder_uninit()
1300 /* Lengthen the minimum hold time. */ in gfs2_glock_update_hold_time()
1301 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, in gfs2_glock_update_hold_time()
1307 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
1317 if (gh->gh_error || (gh->gh_flags & GL_SKIP)) in gfs2_glock_holder_ready()
1318 return gh->gh_error; in gfs2_glock_holder_ready()
1319 gh->gh_error = gfs2_instantiate(gh); in gfs2_glock_holder_ready()
1320 if (gh->gh_error) in gfs2_glock_holder_ready()
1322 return gh->gh_error; in gfs2_glock_holder_ready()
1326 * gfs2_glock_wait - wait on a glock acquisition
1337 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_wait()
1338 gfs2_glock_update_hold_time(gh->gh_gl, start_time); in gfs2_glock_wait()
1353 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1358 * -ESTALE if the request timed out, meaning all glocks were released,
1364 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; in gfs2_glock_async_wait()
1370 * Total up the (minimum hold time * 2) of all glocks and use that to in gfs2_glock_async_wait()
1371 * determine the max amount of time we should wait. in gfs2_glock_async_wait()
1374 timeout += ghs[i].gh_gl->gl_hold_time << 1; in gfs2_glock_async_wait()
1376 if (!wait_event_timeout(sdp->sd_async_glock_wait, in gfs2_glock_async_wait()
1378 ret = -ESTALE; /* request timed out. */ in gfs2_glock_async_wait()
1386 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) { in gfs2_glock_async_wait()
1387 gfs2_glock_update_hold_time(gh->gh_gl, in gfs2_glock_async_wait()
1407 * handle_callback - process a demote request
1421 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in handle_callback()
1424 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
1425 gl->gl_demote_state = state; in handle_callback()
1426 gl->gl_demote_time = jiffies; in handle_callback()
1427 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
1428 gl->gl_demote_state != state) { in handle_callback()
1429 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
1431 if (gl->gl_ops->go_callback) in handle_callback()
1432 gl->gl_ops->go_callback(gl, remote); in handle_callback()
1457 if (!(gh->gh_flags & GL_NOPID)) in pid_is_meaningful()
1459 if (gh->gh_state == LM_ST_UNLOCKED) in pid_is_meaningful()
1465 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1475 __releases(&gl->gl_lockref.lock) in add_to_queue()
1476 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1478 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue()
1479 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1484 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1485 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) in add_to_queue()
1488 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { in add_to_queue()
1489 if (test_bit(GLF_LOCK, &gl->gl_flags)) { in add_to_queue()
1495 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1499 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1500 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid)) in add_to_queue()
1502 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK) in add_to_queue()
1508 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1510 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { in add_to_queue()
1512 gh->gh_error = GLR_TRYFAILED; in add_to_queue()
1516 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) in add_to_queue()
1523 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1526 list_add_tail(&gh->gh_list, insert_pt); in add_to_queue()
1527 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1528 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1529 if (sdp->sd_lockstruct.ls_ops->lm_cancel) in add_to_queue()
1530 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1531 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1535 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); in add_to_queue()
1536 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); in add_to_queue()
1538 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); in add_to_queue()
1539 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); in add_to_queue()
1540 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); in add_to_queue()
1542 gh->gh_gl->gl_name.ln_type, gh->gh_state); in add_to_queue()
1548 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1551 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1558 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq()
1561 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1562 return -EIO; in gfs2_glock_nq()
1564 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1567 gh->gh_error = 0; in gfs2_glock_nq()
1568 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1570 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && in gfs2_glock_nq()
1571 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1572 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1573 gl->gl_lockref.count++; in gfs2_glock_nq()
1577 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1579 if (!(gh->gh_flags & GL_ASYNC)) in gfs2_glock_nq()
1586 * gfs2_glock_poll - poll to see if an async request has been completed
1594 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; in gfs2_glock_poll()
1599 return (test_bit(GLF_DEMOTE, &gl->gl_flags) || in needs_demote()
1600 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); in needs_demote()
1605 struct gfs2_glock *gl = gh->gh_gl; in __gfs2_glock_dq()
1614 if (gh->gh_flags & GL_NOCACHE) in __gfs2_glock_dq()
1617 list_del_init(&gh->gh_list); in __gfs2_glock_dq()
1618 clear_bit(HIF_HOLDER, &gh->gh_iflags); in __gfs2_glock_dq()
1626 if (list_empty(&gl->gl_holders)) in __gfs2_glock_dq()
1630 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in __gfs2_glock_dq()
1634 gl->gl_lockref.count++; in __gfs2_glock_dq()
1635 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in __gfs2_glock_dq()
1636 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in __gfs2_glock_dq()
1637 gl->gl_name.ln_type == LM_TYPE_INODE) in __gfs2_glock_dq()
1638 delay = gl->gl_hold_time; in __gfs2_glock_dq()
1644 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1650 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq()
1651 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_dq()
1653 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1662 if (list_is_first(&gh->gh_list, &gl->gl_holders) && in gfs2_glock_dq()
1663 !test_bit(HIF_HOLDER, &gh->gh_iflags)) { in gfs2_glock_dq()
1664 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1665 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); in gfs2_glock_dq()
1666 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq()
1667 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1677 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && in gfs2_glock_dq()
1679 gh->gh_gl != sdp->sd_jinode_gl) { in gfs2_glock_dq()
1680 sdp->sd_glock_dqs_held++; in gfs2_glock_dq()
1681 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1683 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, in gfs2_glock_dq()
1685 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1690 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1695 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait()
1698 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1702 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1714 * gfs2_glock_nq_num - acquire a glock based on lock number
1719 * @flags: modifier flags for the acquisition
1742 * glock_compare - Compare two struct gfs2_glock structures for sorting
1752 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; in glock_compare()
1753 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; in glock_compare()
1755 if (a->ln_number > b->ln_number) in glock_compare()
1757 if (a->ln_number < b->ln_number) in glock_compare()
1758 return -1; in glock_compare()
1759 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); in glock_compare()
1764 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
1787 while (x--) in nq_m_sync()
1797 * gfs2_glock_nq_m - acquire multiple glocks
1822 return -ENOMEM; in gfs2_glock_nq_m()
1834 * gfs2_glock_dq_m - release multiple glocks
1842 while (num_gh--) in gfs2_glock_dq_m()
1853 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1854 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1855 if (!list_empty(&gl->gl_holders) && in gfs2_glock_cb()
1856 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1858 delay = holdtime - now; in gfs2_glock_cb()
1859 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1860 delay = gl->gl_hold_time; in gfs2_glock_cb()
1864 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1868 * gfs2_should_freeze - Figure out if glock should be frozen
1882 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1884 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1887 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1888 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in gfs2_should_freeze()
1890 if (LM_FLAG_NOEXP & gh->gh_flags) in gfs2_should_freeze()
1898 * gfs2_glock_complete - Callback used by locking
1908 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1910 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1911 gl->gl_reply = ret; in gfs2_glock_complete()
1913 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { in gfs2_glock_complete()
1915 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1916 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1921 gl->gl_lockref.count++; in gfs2_glock_complete()
1922 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1924 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1935 if (gla->gl_name.ln_number > glb->gl_name.ln_number) in glock_cmp()
1937 if (gla->gl_name.ln_number < glb->gl_name.ln_number) in glock_cmp()
1938 return -1; in glock_cmp()
1944 * gfs2_dispose_glock_lru - Demote a list of glocks
1967 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1968 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1969 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1971 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1972 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1976 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1977 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1980 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1983 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1985 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1991 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
2007 if (nr-- <= 0) in gfs2_scan_glock_lru()
2010 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
2011 if (!spin_trylock(&gl->gl_lockref.lock)) in gfs2_scan_glock_lru()
2013 if (gl->gl_lockref.count <= 1 && in gfs2_scan_glock_lru()
2014 (gl->gl_state == LM_ST_UNLOCKED || in gfs2_scan_glock_lru()
2016 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
2020 spin_unlock(&gl->gl_lockref.lock); in gfs2_scan_glock_lru()
2033 if (!(sc->gfp_mask & __GFP_FS)) in gfs2_glock_shrink_scan()
2035 return gfs2_scan_glock_lru(sc->nr_to_scan); in gfs2_glock_shrink_scan()
2051 * glock_hash_walk - Call a function for glock in a hash bucket
2071 if (gl->gl_name.ln_sbd == sdp) in glock_hash_walk()
2076 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
2083 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); in gfs2_cancel_delete_work()
2084 clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags); in gfs2_cancel_delete_work()
2085 if (cancel_delayed_work(&gl->gl_delete)) in gfs2_cancel_delete_work()
2091 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { in flush_delete_work()
2092 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in flush_delete_work()
2094 if (cancel_delayed_work(&gl->gl_delete)) { in flush_delete_work()
2095 queue_delayed_work(sdp->sd_delete_wq, in flush_delete_work()
2096 &gl->gl_delete, 0); in flush_delete_work()
2104 flush_workqueue(sdp->sd_delete_wq); in gfs2_flush_delete_work()
2108 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
2115 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) in thaw_glock()
2117 if (!lockref_get_not_dead(&gl->gl_lockref)) in thaw_glock()
2119 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
2124 * clear_glock - look at a glock and see if we can free it from glock cache
2133 spin_lock(&gl->gl_lockref.lock); in clear_glock()
2134 if (!__lockref_is_dead(&gl->gl_lockref)) { in clear_glock()
2135 gl->gl_lockref.count++; in clear_glock()
2136 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
2140 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
2144 * gfs2_glock_thaw - Thaw any frozen glocks
2156 spin_lock(&gl->gl_lockref.lock); in dump_glock()
2158 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
2168 spin_lock(&gl->gl_lockref.lock); in withdraw_dq()
2169 if (!__lockref_is_dead(&gl->gl_lockref) && in withdraw_dq()
2172 spin_unlock(&gl->gl_lockref.lock); in withdraw_dq()
2181 * gfs2_gl_hash_clear - Empty out the glock hash table
2189 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); in gfs2_gl_hash_clear()
2193 wait_event_timeout(sdp->sd_kill_wait, in gfs2_gl_hash_clear()
2194 atomic_read(&sdp->sd_glock_disposal) == 0, in gfs2_gl_hash_clear()
2244 * dump_holder - print information about a glock holder
2263 owner_pid = pid_nr(gh->gh_owner_pid); in dump_holder()
2264 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); in dump_holder()
2266 comm = gh_owner->comm; in dump_holder()
2269 fs_id_buf, state2str(gh->gh_state), in dump_holder()
2270 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), in dump_holder()
2271 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip); in dump_holder()
2277 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
2300 if (!list_empty(&gl->gl_holders)) in gflags2str()
2304 if (gl->gl_object) in gflags2str()
2323 * gfs2_dump_glock - print information about a glock
2342 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
2346 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_dump_glock()
2347 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; in gfs2_dump_glock()
2350 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_dump_glock()
2353 nrpages = mapping->nrpages; in gfs2_dump_glock()
2357 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); in gfs2_dump_glock()
2358 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
2359 dtime *= 1000000/HZ; /* demote time in uSec */ in gfs2_dump_glock()
2360 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
2364 fs_id_buf, state2str(gl->gl_state), in gfs2_dump_glock()
2365 gl->gl_name.ln_type, in gfs2_dump_glock()
2366 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
2368 state2str(gl->gl_target), in gfs2_dump_glock()
2369 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
2370 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
2371 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
2372 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); in gfs2_dump_glock()
2374 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2377 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
2378 glops->go_dump(seq, gl, fs_id_buf); in gfs2_dump_glock()
2386 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
2387 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
2388 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
2389 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
2390 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
2391 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
2392 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
2393 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
2394 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
2395 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
2428 struct gfs2_sbd *sdp = seq->private; in gfs2_sbstats_seq_show()
2437 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], in gfs2_sbstats_seq_show()
2441 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); in gfs2_sbstats_seq_show()
2446 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> in gfs2_sbstats_seq_show()
2447 lkstats[index - 1].stats[subindex]); in gfs2_sbstats_seq_show()
2465 return -ENOMEM; in gfs2_glock_init()
2468 ret = register_shrinker(&glock_shrinker, "gfs2-glock"); in gfs2_glock_init()
2490 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next()
2495 if (!lockref_put_not_zero(&gl->gl_lockref)) in gfs2_glock_iter_next()
2499 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
2501 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
2508 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
2511 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2515 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2517 n--; in gfs2_glock_iter_next()
2520 gi->gl = gl; in gfs2_glock_iter_next()
2526 struct gfs2_glock_iter *gi = seq->private; in gfs2_glock_seq_start()
2533 if (*pos < gi->last_pos) { in gfs2_glock_seq_start()
2534 rhashtable_walk_exit(&gi->hti); in gfs2_glock_seq_start()
2535 rhashtable_walk_enter(&gl_hash_table, &gi->hti); in gfs2_glock_seq_start()
2538 n = *pos - gi->last_pos; in gfs2_glock_seq_start()
2541 rhashtable_walk_start(&gi->hti); in gfs2_glock_seq_start()
2544 gi->last_pos = *pos; in gfs2_glock_seq_start()
2545 return gi->gl; in gfs2_glock_seq_start()
2551 struct gfs2_glock_iter *gi = seq->private; in gfs2_glock_seq_next()
2554 gi->last_pos = *pos; in gfs2_glock_seq_next()
2556 return gi->gl; in gfs2_glock_seq_next()
2562 struct gfs2_glock_iter *gi = seq->private; in gfs2_glock_seq_stop()
2564 rhashtable_walk_stop(&gi->hti); in gfs2_glock_seq_stop()
2623 struct seq_file *seq = file->private_data; in __gfs2_glocks_open()
2624 struct gfs2_glock_iter *gi = seq->private; in __gfs2_glocks_open()
2626 gi->sdp = inode->i_private; in __gfs2_glocks_open()
2627 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); in __gfs2_glocks_open()
2628 if (seq->buf) in __gfs2_glocks_open()
2629 seq->size = GFS2_SEQ_GOODSIZE; in __gfs2_glocks_open()
2634 gi->last_pos = -1; in __gfs2_glocks_open()
2635 gi->gl = NULL; in __gfs2_glocks_open()
2636 rhashtable_walk_enter(&gl_hash_table, &gi->hti); in __gfs2_glocks_open()
2648 struct seq_file *seq = file->private_data; in gfs2_glocks_release()
2649 struct gfs2_glock_iter *gi = seq->private; in gfs2_glocks_release()
2651 if (gi->gl) in gfs2_glocks_release()
2652 gfs2_glock_put(gi->gl); in gfs2_glocks_release()
2653 rhashtable_walk_exit(&gi->hti); in gfs2_glocks_release()
2688 struct pid_namespace *ns = task_active_pid_ns(current); in gfs2_glockfd_next_task() local
2691 if (i->task) in gfs2_glockfd_next_task()
2692 put_task_struct(i->task); in gfs2_glockfd_next_task()
2696 i->task = NULL; in gfs2_glockfd_next_task()
2697 pid = find_ge_pid(i->tgid, ns); in gfs2_glockfd_next_task()
2699 i->tgid = pid_nr_ns(pid, ns); in gfs2_glockfd_next_task()
2700 i->task = pid_task(pid, PIDTYPE_TGID); in gfs2_glockfd_next_task()
2701 if (!i->task) { in gfs2_glockfd_next_task()
2702 i->tgid++; in gfs2_glockfd_next_task()
2705 get_task_struct(i->task); in gfs2_glockfd_next_task()
2708 return i->task; in gfs2_glockfd_next_task()
2713 if (i->file) { in gfs2_glockfd_next_file()
2714 fput(i->file); in gfs2_glockfd_next_file()
2715 i->file = NULL; in gfs2_glockfd_next_file()
2719 for(;; i->fd++) { in gfs2_glockfd_next_file()
2722 i->file = task_lookup_next_fd_rcu(i->task, &i->fd); in gfs2_glockfd_next_file()
2723 if (!i->file) { in gfs2_glockfd_next_file()
2724 i->fd = 0; in gfs2_glockfd_next_file()
2727 inode = file_inode(i->file); in gfs2_glockfd_next_file()
2728 if (inode->i_sb != i->sb) in gfs2_glockfd_next_file()
2730 if (get_file_rcu(i->file)) in gfs2_glockfd_next_file()
2734 return i->file; in gfs2_glockfd_next_file()
2739 struct gfs2_glockfd_iter *i = seq->private; in gfs2_glockfd_seq_start()
2746 i->tgid++; in gfs2_glockfd_seq_start()
2754 struct gfs2_glockfd_iter *i = seq->private; in gfs2_glockfd_seq_next()
2757 i->fd++; in gfs2_glockfd_seq_next()
2761 i->tgid++; in gfs2_glockfd_seq_next()
2768 struct gfs2_glockfd_iter *i = seq->private; in gfs2_glockfd_seq_stop()
2770 if (i->file) in gfs2_glockfd_seq_stop()
2771 fput(i->file); in gfs2_glockfd_seq_stop()
2772 if (i->task) in gfs2_glockfd_seq_stop()
2773 put_task_struct(i->task); in gfs2_glockfd_seq_stop()
2779 struct gfs2_file *fp = i->file->private_data; in gfs2_glockfd_seq_show_flock()
2780 struct gfs2_holder *fl_gh = &fp->f_fl_gh; in gfs2_glockfd_seq_show_flock()
2783 if (!READ_ONCE(fl_gh->gh_gl)) in gfs2_glockfd_seq_show_flock()
2786 spin_lock(&i->file->f_lock); in gfs2_glockfd_seq_show_flock()
2788 gl_name = fl_gh->gh_gl->gl_name; in gfs2_glockfd_seq_show_flock()
2789 spin_unlock(&i->file->f_lock); in gfs2_glockfd_seq_show_flock()
2793 i->tgid, i->fd, gl_name.ln_type, in gfs2_glockfd_seq_show_flock()
2800 struct gfs2_glockfd_iter *i = seq->private; in gfs2_glockfd_seq_show()
2801 struct inode *inode = file_inode(i->file); in gfs2_glockfd_seq_show()
2805 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; in gfs2_glockfd_seq_show()
2808 i->tgid, i->fd, gl->gl_name.ln_type, in gfs2_glockfd_seq_show()
2809 (unsigned long long)gl->gl_name.ln_number); in gfs2_glockfd_seq_show()
2826 struct gfs2_sbd *sdp = inode->i_private; in gfs2_glockfd_open()
2831 return -ENOMEM; in gfs2_glockfd_open()
2832 i->sb = sdp->sd_vfs; in gfs2_glockfd_open()
2848 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); in gfs2_create_debugfs_file()
2850 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, in gfs2_create_debugfs_file()
2853 debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, in gfs2_create_debugfs_file()
2856 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, in gfs2_create_debugfs_file()
2859 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, in gfs2_create_debugfs_file()
2865 debugfs_remove_recursive(sdp->debugfs_dir); in gfs2_delete_debugfs_file()
2866 sdp->debugfs_dir = NULL; in gfs2_delete_debugfs_file()