Lines Matching refs:gh

63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64 static void __gfs2_glock_dq(struct gfs2_holder *gh);
333 struct gfs2_holder *gh) in may_grant() argument
347 return gh->gh_state == LM_ST_EXCLUSIVE && in may_grant()
349 (gh->gh_flags & LM_FLAG_NODE_SCOPE); in may_grant()
353 return gh->gh_state == current_gh->gh_state; in may_grant()
360 if (gl->gl_state == gh->gh_state) in may_grant()
362 if (gh->gh_flags & GL_EXACT) in may_grant()
365 return gh->gh_state == LM_ST_SHARED || in may_grant()
366 gh->gh_state == LM_ST_DEFERRED; in may_grant()
368 if (gh->gh_flags & LM_FLAG_ANY) in may_grant()
373 static void gfs2_holder_wake(struct gfs2_holder *gh) in gfs2_holder_wake() argument
375 clear_bit(HIF_WAIT, &gh->gh_iflags); in gfs2_holder_wake()
377 wake_up_bit(&gh->gh_iflags, HIF_WAIT); in gfs2_holder_wake()
378 if (gh->gh_flags & GL_ASYNC) { in gfs2_holder_wake()
379 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; in gfs2_holder_wake()
393 struct gfs2_holder *gh, *tmp; in do_error() local
395 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
396 if (!test_bit(HIF_WAIT, &gh->gh_iflags)) in do_error()
399 gh->gh_error = -EIO; in do_error()
400 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) in do_error()
401 gh->gh_error = GLR_TRYFAILED; in do_error()
404 list_del_init(&gh->gh_list); in do_error()
405 trace_gfs2_glock_queue(gh, 0); in do_error()
406 gfs2_holder_wake(gh); in do_error()
421 struct gfs2_holder *gh, *tmp; in demote_incompat_holders() local
428 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in demote_incompat_holders()
433 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in demote_incompat_holders()
435 if (gh == current_gh) in demote_incompat_holders()
437 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) && in demote_incompat_holders()
438 !may_grant(gl, current_gh, gh)) { in demote_incompat_holders()
444 __gfs2_glock_dq(gh); in demote_incompat_holders()
456 struct gfs2_holder *gh; in find_first_holder() local
459 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, in find_first_holder()
461 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_holder()
462 return gh; in find_first_holder()
476 struct gfs2_holder *gh; in find_first_strong_holder() local
478 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_strong_holder()
479 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_strong_holder()
481 if (!test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags)) in find_first_strong_holder()
482 return gh; in find_first_strong_holder()
493 int gfs2_instantiate(struct gfs2_holder *gh) in gfs2_instantiate() argument
495 struct gfs2_glock *gl = gh->gh_gl; in gfs2_instantiate()
529 return glops->go_held(gh); in gfs2_instantiate()
542 struct gfs2_holder *gh, *current_gh; in do_promote() local
546 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in do_promote()
547 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_promote()
549 if (!may_grant(gl, current_gh, gh)) { in do_promote()
556 if (list_is_first(&gh->gh_list, &gl->gl_holders)) in do_promote()
561 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
562 trace_gfs2_promote(gh); in do_promote()
563 gfs2_holder_wake(gh); in do_promote()
565 current_gh = gh; in do_promote()
580 struct gfs2_holder *gh; in find_first_waiter() local
582 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
583 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_waiter()
584 return gh; in find_first_waiter()
644 struct gfs2_holder *gh; in finish_xmote() local
650 gh = find_first_waiter(gl); in finish_xmote()
659 if (gh && (ret & LM_OUT_CANCELED)) in finish_xmote()
660 gfs2_holder_wake(gh); in finish_xmote()
661 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
664 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) in finish_xmote()
665 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
666 gh = find_first_waiter(gl); in finish_xmote()
667 gl->gl_target = gh->gh_state; in finish_xmote()
672 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { in finish_xmote()
682 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
687 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
738 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, in do_xmote() argument
745 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); in do_xmote()
749 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) in do_xmote()
888 struct gfs2_holder *gh = NULL; in run_queue() local
909 gh = find_first_waiter(gl); in run_queue()
910 gl->gl_target = gh->gh_state; in run_queue()
911 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) in run_queue()
914 do_xmote(gl, gh, gl->gl_target); in run_queue()
952 struct gfs2_holder gh; in gfs2_glock_poke() local
955 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_); in gfs2_glock_poke()
956 error = gfs2_glock_nq(&gh); in gfs2_glock_poke()
958 gfs2_glock_dq(&gh); in gfs2_glock_poke()
959 gfs2_holder_uninit(&gh); in gfs2_glock_poke()
1256 struct gfs2_holder *gh, unsigned long ip) in __gfs2_holder_init() argument
1258 INIT_LIST_HEAD(&gh->gh_list); in __gfs2_holder_init()
1259 gh->gh_gl = gl; in __gfs2_holder_init()
1260 gh->gh_ip = ip; in __gfs2_holder_init()
1261 gh->gh_owner_pid = get_pid(task_pid(current)); in __gfs2_holder_init()
1262 gh->gh_state = state; in __gfs2_holder_init()
1263 gh->gh_flags = flags; in __gfs2_holder_init()
1264 gh->gh_iflags = 0; in __gfs2_holder_init()
1278 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) in gfs2_holder_reinit() argument
1280 gh->gh_state = state; in gfs2_holder_reinit()
1281 gh->gh_flags = flags; in gfs2_holder_reinit()
1282 gh->gh_iflags = 0; in gfs2_holder_reinit()
1283 gh->gh_ip = _RET_IP_; in gfs2_holder_reinit()
1284 put_pid(gh->gh_owner_pid); in gfs2_holder_reinit()
1285 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_reinit()
1294 void gfs2_holder_uninit(struct gfs2_holder *gh) in gfs2_holder_uninit() argument
1296 put_pid(gh->gh_owner_pid); in gfs2_holder_uninit()
1297 gfs2_glock_put(gh->gh_gl); in gfs2_holder_uninit()
1298 gfs2_holder_mark_uninitialized(gh); in gfs2_holder_uninit()
1299 gh->gh_ip = 0; in gfs2_holder_uninit()
1322 int gfs2_glock_holder_ready(struct gfs2_holder *gh) in gfs2_glock_holder_ready() argument
1324 if (gh->gh_error || (gh->gh_flags & GL_SKIP)) in gfs2_glock_holder_ready()
1325 return gh->gh_error; in gfs2_glock_holder_ready()
1326 gh->gh_error = gfs2_instantiate(gh); in gfs2_glock_holder_ready()
1327 if (gh->gh_error) in gfs2_glock_holder_ready()
1328 gfs2_glock_dq(gh); in gfs2_glock_holder_ready()
1329 return gh->gh_error; in gfs2_glock_holder_ready()
1339 int gfs2_glock_wait(struct gfs2_holder *gh) in gfs2_glock_wait() argument
1344 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_wait()
1345 gfs2_glock_update_hold_time(gh->gh_gl, start_time); in gfs2_glock_wait()
1346 return gfs2_glock_holder_ready(gh); in gfs2_glock_wait()
1390 struct gfs2_holder *gh = &ghs[i]; in gfs2_glock_async_wait() local
1393 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) { in gfs2_glock_async_wait()
1394 gfs2_glock_update_hold_time(gh->gh_gl, in gfs2_glock_async_wait()
1397 ret2 = gfs2_glock_holder_ready(gh); in gfs2_glock_async_wait()
1405 struct gfs2_holder *gh = &ghs[i]; in gfs2_glock_async_wait() local
1407 gfs2_glock_dq(gh); in gfs2_glock_async_wait()
1462 static inline bool pid_is_meaningful(const struct gfs2_holder *gh) in pid_is_meaningful() argument
1464 if (!(gh->gh_flags & GL_NOPID)) in pid_is_meaningful()
1466 if (gh->gh_state == LM_ST_UNLOCKED) in pid_is_meaningful()
1481 static inline void add_to_queue(struct gfs2_holder *gh) in add_to_queue() argument
1485 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue()
1491 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1492 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) in add_to_queue()
1495 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { in add_to_queue()
1500 try_futile = !may_grant(gl, current_gh, gh); in add_to_queue()
1507 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid)) in add_to_queue()
1509 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK) in add_to_queue()
1521 gh->gh_error = GLR_TRYFAILED; in add_to_queue()
1522 gfs2_holder_wake(gh); in add_to_queue()
1527 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) in add_to_queue()
1530 trace_gfs2_glock_queue(gh, 1); in add_to_queue()
1534 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1535 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) in add_to_queue()
1539 list_add_tail(&gh->gh_list, insert_pt); in add_to_queue()
1541 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1542 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { in add_to_queue()
1555 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); in add_to_queue()
1556 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); in add_to_queue()
1558 gh->gh_gl->gl_name.ln_type, gh->gh_state); in add_to_queue()
1572 int gfs2_glock_nq(struct gfs2_holder *gh) in gfs2_glock_nq() argument
1574 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq()
1577 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1583 gh->gh_error = 0; in gfs2_glock_nq()
1585 add_to_queue(gh); in gfs2_glock_nq()
1586 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && in gfs2_glock_nq()
1595 if (!(gh->gh_flags & GL_ASYNC)) in gfs2_glock_nq()
1596 error = gfs2_glock_wait(gh); in gfs2_glock_nq()
1608 int gfs2_glock_poll(struct gfs2_holder *gh) in gfs2_glock_poll() argument
1610 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; in gfs2_glock_poll()
1619 static void __gfs2_glock_dq(struct gfs2_holder *gh) in __gfs2_glock_dq() argument
1621 struct gfs2_glock *gl = gh->gh_gl; in __gfs2_glock_dq()
1632 while (gh) { in __gfs2_glock_dq()
1642 gh->gh_gl != sdp->sd_jinode_gl) { in __gfs2_glock_dq()
1656 if (gh->gh_flags & GL_NOCACHE) in __gfs2_glock_dq()
1659 list_del_init(&gh->gh_list); in __gfs2_glock_dq()
1660 clear_bit(HIF_HOLDER, &gh->gh_iflags); in __gfs2_glock_dq()
1661 trace_gfs2_glock_queue(gh, 0); in __gfs2_glock_dq()
1684 gh = find_first_holder(gl); in __gfs2_glock_dq()
1705 void gfs2_glock_dq(struct gfs2_holder *gh) in gfs2_glock_dq() argument
1707 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq()
1710 if (list_is_first(&gh->gh_list, &gl->gl_holders) && in gfs2_glock_dq()
1711 !test_bit(HIF_HOLDER, &gh->gh_iflags)) { in gfs2_glock_dq()
1714 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq()
1718 __gfs2_glock_dq(gh); in gfs2_glock_dq()
1722 void gfs2_glock_dq_wait(struct gfs2_holder *gh) in gfs2_glock_dq_wait() argument
1724 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait()
1725 gfs2_glock_dq(gh); in gfs2_glock_dq_wait()
1736 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) in gfs2_glock_dq_uninit() argument
1738 gfs2_glock_dq(gh); in gfs2_glock_dq_uninit()
1739 gfs2_holder_uninit(gh); in gfs2_glock_dq_uninit()
1756 unsigned int state, u16 flags, struct gfs2_holder *gh) in gfs2_glock_nq_num() argument
1763 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1936 const struct gfs2_holder *gh; in gfs2_should_freeze() local
1943 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1944 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in gfs2_should_freeze()
1946 if (LM_FLAG_NOEXP & gh->gh_flags) in gfs2_should_freeze()
2325 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, in dump_holder() argument
2333 if (pid_is_meaningful(gh)) { in dump_holder()
2337 owner_pid = pid_nr(gh->gh_owner_pid); in dump_holder()
2338 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); in dump_holder()
2343 fs_id_buf, state2str(gh->gh_state), in dump_holder()
2344 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), in dump_holder()
2345 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip); in dump_holder()
2416 const struct gfs2_holder *gh; in gfs2_dump_glock() local
2446 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2447 dump_holder(seq, gh, fs_id_buf); in gfs2_dump_glock()