Lines Matching refs:gh
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
311 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
315 if (gh != gh_head) { in may_grant()
324 gh->gh_state == LM_ST_EXCLUSIVE && in may_grant()
325 (gh->gh_flags & LM_FLAG_NODE_SCOPE)) in may_grant()
327 if ((gh->gh_state == LM_ST_EXCLUSIVE || in may_grant()
331 if (gl->gl_state == gh->gh_state) in may_grant()
333 if (gh->gh_flags & GL_EXACT) in may_grant()
336 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) in may_grant()
338 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) in may_grant()
341 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
346 static void gfs2_holder_wake(struct gfs2_holder *gh) in gfs2_holder_wake() argument
348 clear_bit(HIF_WAIT, &gh->gh_iflags); in gfs2_holder_wake()
350 wake_up_bit(&gh->gh_iflags, HIF_WAIT); in gfs2_holder_wake()
351 if (gh->gh_flags & GL_ASYNC) { in gfs2_holder_wake()
352 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; in gfs2_holder_wake()
366 struct gfs2_holder *gh, *tmp; in do_error() local
368 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
369 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_error()
372 gh->gh_error = -EIO; in do_error()
373 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) in do_error()
374 gh->gh_error = GLR_TRYFAILED; in do_error()
377 list_del_init(&gh->gh_list); in do_error()
378 trace_gfs2_glock_queue(gh, 0); in do_error()
379 gfs2_holder_wake(gh); in do_error()
396 struct gfs2_holder *gh, *tmp; in do_promote() local
400 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
401 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_promote()
403 if (may_grant(gl, gh)) { in do_promote()
404 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
408 ret = glops->go_lock(gh); in do_promote()
413 gh->gh_error = ret; in do_promote()
414 list_del_init(&gh->gh_list); in do_promote()
415 trace_gfs2_glock_queue(gh, 0); in do_promote()
416 gfs2_holder_wake(gh); in do_promote()
419 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
420 trace_gfs2_promote(gh, 1); in do_promote()
421 gfs2_holder_wake(gh); in do_promote()
424 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
425 trace_gfs2_promote(gh, 0); in do_promote()
426 gfs2_holder_wake(gh); in do_promote()
429 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
444 struct gfs2_holder *gh; in find_first_waiter() local
446 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
447 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_waiter()
448 return gh; in find_first_waiter()
508 struct gfs2_holder *gh; in finish_xmote() local
515 gh = find_first_waiter(gl); in finish_xmote()
524 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
527 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) in finish_xmote()
528 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
529 gh = find_first_waiter(gl); in finish_xmote()
530 gl->gl_target = gh->gh_state; in finish_xmote()
535 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { in finish_xmote()
545 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
550 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
602 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
608 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); in do_xmote()
612 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) in do_xmote()
732 struct gfs2_holder *gh; in find_first_holder() local
735 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in find_first_holder()
736 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_holder()
737 return gh; in find_first_holder()
753 struct gfs2_holder *gh = NULL; in run_queue() local
778 gh = find_first_waiter(gl); in run_queue()
779 gl->gl_target = gh->gh_state; in run_queue()
780 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) in run_queue()
783 do_xmote(gl, gh, gl->gl_target); in run_queue()
822 struct gfs2_holder gh; in gfs2_glock_poke() local
825 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); in gfs2_glock_poke()
826 error = gfs2_glock_nq(&gh); in gfs2_glock_poke()
828 gfs2_glock_dq(&gh); in gfs2_glock_poke()
829 gfs2_holder_uninit(&gh); in gfs2_glock_poke()
1123 struct gfs2_holder *gh) in gfs2_holder_init() argument
1125 INIT_LIST_HEAD(&gh->gh_list); in gfs2_holder_init()
1126 gh->gh_gl = gl; in gfs2_holder_init()
1127 gh->gh_ip = _RET_IP_; in gfs2_holder_init()
1128 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_init()
1129 gh->gh_state = state; in gfs2_holder_init()
1130 gh->gh_flags = flags; in gfs2_holder_init()
1131 gh->gh_error = 0; in gfs2_holder_init()
1132 gh->gh_iflags = 0; in gfs2_holder_init()
1146 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) in gfs2_holder_reinit() argument
1148 gh->gh_state = state; in gfs2_holder_reinit()
1149 gh->gh_flags = flags; in gfs2_holder_reinit()
1150 gh->gh_iflags = 0; in gfs2_holder_reinit()
1151 gh->gh_ip = _RET_IP_; in gfs2_holder_reinit()
1152 put_pid(gh->gh_owner_pid); in gfs2_holder_reinit()
1153 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_reinit()
1162 void gfs2_holder_uninit(struct gfs2_holder *gh) in gfs2_holder_uninit() argument
1164 put_pid(gh->gh_owner_pid); in gfs2_holder_uninit()
1165 gfs2_glock_put(gh->gh_gl); in gfs2_holder_uninit()
1166 gfs2_holder_mark_uninitialized(gh); in gfs2_holder_uninit()
1167 gh->gh_ip = 0; in gfs2_holder_uninit()
1188 int gfs2_glock_wait(struct gfs2_holder *gh) in gfs2_glock_wait() argument
1193 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_wait()
1194 gfs2_glock_update_hold_time(gh->gh_gl, start_time); in gfs2_glock_wait()
1195 return gh->gh_error; in gfs2_glock_wait()
1342 static inline void add_to_queue(struct gfs2_holder *gh) in add_to_queue() argument
1346 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue()
1352 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1353 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) in add_to_queue()
1356 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { in add_to_queue()
1358 try_futile = !may_grant(gl, gh); in add_to_queue()
1364 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && in add_to_queue()
1365 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) in add_to_queue()
1370 gh->gh_error = GLR_TRYFAILED; in add_to_queue()
1371 gfs2_holder_wake(gh); in add_to_queue()
1376 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) in add_to_queue()
1379 trace_gfs2_glock_queue(gh, 1); in add_to_queue()
1383 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1384 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) in add_to_queue()
1388 list_add_tail(&gh->gh_list, insert_pt); in add_to_queue()
1390 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1391 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { in add_to_queue()
1404 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); in add_to_queue()
1405 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); in add_to_queue()
1407 gh->gh_gl->gl_name.ln_type, gh->gh_state); in add_to_queue()
1421 int gfs2_glock_nq(struct gfs2_holder *gh) in gfs2_glock_nq() argument
1423 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq()
1426 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1433 add_to_queue(gh); in gfs2_glock_nq()
1434 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && in gfs2_glock_nq()
1443 if (!(gh->gh_flags & GL_ASYNC)) in gfs2_glock_nq()
1444 error = gfs2_glock_wait(gh); in gfs2_glock_nq()
1456 int gfs2_glock_poll(struct gfs2_holder *gh) in gfs2_glock_poll() argument
1458 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; in gfs2_glock_poll()
1467 void gfs2_glock_dq(struct gfs2_holder *gh) in gfs2_glock_dq() argument
1469 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq()
1484 gh->gh_gl != sdp->sd_jinode_gl) { in gfs2_glock_dq()
1492 if (gh->gh_flags & GL_NOCACHE) in gfs2_glock_dq()
1495 list_del_init(&gh->gh_list); in gfs2_glock_dq()
1496 clear_bit(HIF_HOLDER, &gh->gh_iflags); in gfs2_glock_dq()
1505 trace_gfs2_glock_queue(gh, 0); in gfs2_glock_dq()
1517 void gfs2_glock_dq_wait(struct gfs2_holder *gh) in gfs2_glock_dq_wait() argument
1519 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait()
1520 gfs2_glock_dq(gh); in gfs2_glock_dq_wait()
1531 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) in gfs2_glock_dq_uninit() argument
1533 gfs2_glock_dq(gh); in gfs2_glock_dq_uninit()
1534 gfs2_holder_uninit(gh); in gfs2_glock_dq_uninit()
1551 unsigned int state, u16 flags, struct gfs2_holder *gh) in gfs2_glock_nq_num() argument
1558 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1708 const struct gfs2_holder *gh; in gfs2_should_freeze() local
1715 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1716 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in gfs2_should_freeze()
1718 if (LM_FLAG_NOEXP & gh->gh_flags) in gfs2_should_freeze()
2091 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, in dump_holder() argument
2098 if (gh->gh_owner_pid) in dump_holder()
2099 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); in dump_holder()
2101 fs_id_buf, state2str(gh->gh_state), in dump_holder()
2102 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), in dump_holder()
2103 gh->gh_error, in dump_holder()
2104 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, in dump_holder()
2106 (void *)gh->gh_ip); in dump_holder()
2173 const struct gfs2_holder *gh; in gfs2_dump_glock() local
2203 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2204 dump_holder(seq, gh, fs_id_buf); in gfs2_dump_glock()