Lines Matching refs:gl

34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)  in gfs2_ail_error()  argument
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_error()
44 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
45 gfs2_glock2aspace(gl)); in gfs2_ail_error()
59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
63 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
77 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); in __gfs2_ail_flush()
88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) in gfs2_ail_empty_gl() argument
90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_empty_gl()
95 revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_empty_gl()
131 __gfs2_ail_flush(gl, 0, revokes); in gfs2_ail_empty_gl()
141 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) in gfs2_ail_flush() argument
143 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_flush()
144 unsigned int revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_flush()
153 __gfs2_ail_flush(gl, fsync, revokes); in gfs2_ail_flush()
165 static int gfs2_rgrp_metasync(struct gfs2_glock *gl) in gfs2_rgrp_metasync() argument
167 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_rgrp_metasync()
169 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in gfs2_rgrp_metasync()
193 static int rgrp_go_sync(struct gfs2_glock *gl) in rgrp_go_sync() argument
195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_sync()
196 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in rgrp_go_sync()
199 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in rgrp_go_sync()
201 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in rgrp_go_sync()
203 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in rgrp_go_sync()
205 error = gfs2_rgrp_metasync(gl); in rgrp_go_sync()
207 error = gfs2_ail_empty_gl(gl); in rgrp_go_sync()
222 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) in rgrp_go_inval() argument
224 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_inval()
226 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in rgrp_go_inval()
239 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, in gfs2_rgrp_go_dump() argument
242 struct gfs2_rgrpd *rgd = gl->gl_object; in gfs2_rgrp_go_dump()
248 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) in gfs2_glock2inode() argument
252 spin_lock(&gl->gl_lockref.lock); in gfs2_glock2inode()
253 ip = gl->gl_object; in gfs2_glock2inode()
256 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock2inode()
260 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) in gfs2_glock2rgrp() argument
264 spin_lock(&gl->gl_lockref.lock); in gfs2_glock2rgrp()
265 rgd = gl->gl_object; in gfs2_glock2rgrp()
266 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock2rgrp()
285 int gfs2_inode_metasync(struct gfs2_glock *gl) in gfs2_inode_metasync() argument
287 struct address_space *metamapping = gfs2_glock2aspace(gl); in gfs2_inode_metasync()
293 gfs2_io_error(gl->gl_name.ln_sbd); in gfs2_inode_metasync()
303 static int inode_go_sync(struct gfs2_glock *gl) in inode_go_sync() argument
305 struct gfs2_inode *ip = gfs2_glock2inode(gl); in inode_go_sync()
307 struct address_space *metamapping = gfs2_glock2aspace(gl); in inode_go_sync()
315 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in inode_go_sync()
318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in inode_go_sync()
320 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in inode_go_sync()
329 ret = gfs2_inode_metasync(gl); in inode_go_sync()
332 ret = gfs2_ail_empty_gl(gl); in inode_go_sync()
340 clear_bit(GLF_DIRTY, &gl->gl_flags); in inode_go_sync()
358 static void inode_go_inval(struct gfs2_glock *gl, int flags) in inode_go_inval() argument
360 struct gfs2_inode *ip = gfs2_glock2inode(gl); in inode_go_inval()
363 struct address_space *mapping = gfs2_glock2aspace(gl); in inode_go_inval()
366 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); in inode_go_inval()
373 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { in inode_go_inval()
374 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, in inode_go_inval()
377 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; in inode_go_inval()
392 static int inode_go_demote_ok(const struct gfs2_glock *gl) in inode_go_demote_ok() argument
394 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in inode_go_demote_ok()
396 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) in inode_go_demote_ok()
501 static int inode_go_instantiate(struct gfs2_glock *gl) in inode_go_instantiate() argument
503 struct gfs2_inode *ip = gl->gl_object; in inode_go_instantiate()
513 struct gfs2_glock *gl = gh->gh_gl; in inode_go_held() local
514 struct gfs2_inode *ip = gl->gl_object; in inode_go_held()
524 (gl->gl_state == LM_ST_EXCLUSIVE) && in inode_go_held()
539 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, in inode_go_dump() argument
542 struct gfs2_inode *ip = gl->gl_object; in inode_go_dump()
564 static void freeze_go_callback(struct gfs2_glock *gl, bool remote) in freeze_go_callback() argument
566 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_callback()
570 (gl->gl_state != LM_ST_SHARED && in freeze_go_callback()
571 gl->gl_state != LM_ST_UNLOCKED) || in freeze_go_callback()
572 gl->gl_demote_state != LM_ST_UNLOCKED) in freeze_go_callback()
593 static int freeze_go_xmote_bh(struct gfs2_glock *gl) in freeze_go_xmote_bh() argument
595 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_xmote_bh()
623 static int freeze_go_demote_ok(const struct gfs2_glock *gl) in freeze_go_demote_ok() argument
635 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) in iopen_go_callback() argument
637 struct gfs2_inode *ip = gl->gl_object; in iopen_go_callback()
638 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in iopen_go_callback()
644 if (gl->gl_demote_state == LM_ST_UNLOCKED && in iopen_go_callback()
645 gl->gl_state == LM_ST_SHARED && ip) { in iopen_go_callback()
646 gl->gl_lockref.count++; in iopen_go_callback()
647 if (!gfs2_queue_try_to_evict(gl)) in iopen_go_callback()
648 gl->gl_lockref.count--; in iopen_go_callback()
660 static void inode_go_free(struct gfs2_glock *gl) in inode_go_free() argument
664 if (!test_bit(GLF_FREEING, &gl->gl_flags)) in inode_go_free()
666 clear_bit_unlock(GLF_FREEING, &gl->gl_flags); in inode_go_free()
667 wake_up_bit(&gl->gl_flags, GLF_FREEING); in inode_go_free()
676 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote) in nondisk_go_callback() argument
678 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in nondisk_go_callback()
682 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK) in nondisk_go_callback()
688 clear_bit(GLF_DEMOTE, &gl->gl_flags); in nondisk_go_callback()
689 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in nondisk_go_callback()
699 if (gl->gl_demote_state != LM_ST_UNLOCKED) in nondisk_go_callback()