Lines Matching refs:gl
32 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument
34 fs_err(gl->gl_name.ln_sbd, in gfs2_ail_error()
39 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error()
40 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
41 gfs2_glock2aspace(gl)); in gfs2_ail_error()
42 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); in gfs2_ail_error()
53 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
56 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
57 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
71 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
76 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); in __gfs2_ail_flush()
82 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) in gfs2_ail_empty_gl() argument
84 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_empty_gl()
90 tr.tr_revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_empty_gl()
105 __gfs2_ail_flush(gl, 0, tr.tr_revokes); in gfs2_ail_empty_gl()
112 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) in gfs2_ail_flush() argument
114 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_ail_flush()
115 unsigned int revokes = atomic_read(&gl->gl_ail_count); in gfs2_ail_flush()
128 __gfs2_ail_flush(gl, fsync, max_revokes); in gfs2_ail_flush()
143 static void rgrp_go_sync(struct gfs2_glock *gl) in rgrp_go_sync() argument
145 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_sync()
150 spin_lock(&gl->gl_lockref.lock); in rgrp_go_sync()
151 rgd = gl->gl_object; in rgrp_go_sync()
154 spin_unlock(&gl->gl_lockref.lock); in rgrp_go_sync()
156 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in rgrp_go_sync()
158 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in rgrp_go_sync()
160 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in rgrp_go_sync()
162 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
163 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync()
165 gfs2_ail_empty_gl(gl); in rgrp_go_sync()
167 spin_lock(&gl->gl_lockref.lock); in rgrp_go_sync()
168 rgd = gl->gl_object; in rgrp_go_sync()
171 spin_unlock(&gl->gl_lockref.lock); in rgrp_go_sync()
184 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) in rgrp_go_inval() argument
186 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in rgrp_go_inval()
188 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in rgrp_go_inval()
194 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); in rgrp_go_inval()
195 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_inval()
201 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) in gfs2_glock2inode() argument
205 spin_lock(&gl->gl_lockref.lock); in gfs2_glock2inode()
206 ip = gl->gl_object; in gfs2_glock2inode()
209 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock2inode()
213 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) in gfs2_glock2rgrp() argument
217 spin_lock(&gl->gl_lockref.lock); in gfs2_glock2rgrp()
218 rgd = gl->gl_object; in gfs2_glock2rgrp()
219 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock2rgrp()
239 static void inode_go_sync(struct gfs2_glock *gl) in inode_go_sync() argument
241 struct gfs2_inode *ip = gfs2_glock2inode(gl); in inode_go_sync()
243 struct address_space *metamapping = gfs2_glock2aspace(gl); in inode_go_sync()
251 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) in inode_go_sync()
254 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); in inode_go_sync()
256 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in inode_go_sync()
267 gfs2_ail_empty_gl(gl); in inode_go_sync()
273 clear_bit(GLF_DIRTY, &gl->gl_flags); in inode_go_sync()
290 static void inode_go_inval(struct gfs2_glock *gl, int flags) in inode_go_inval() argument
292 struct gfs2_inode *ip = gfs2_glock2inode(gl); in inode_go_inval()
294 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); in inode_go_inval()
297 struct address_space *mapping = gfs2_glock2aspace(gl); in inode_go_inval()
307 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { in inode_go_inval()
308 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, in inode_go_inval()
311 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; in inode_go_inval()
326 static int inode_go_demote_ok(const struct gfs2_glock *gl) in inode_go_demote_ok() argument
328 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in inode_go_demote_ok()
330 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) in inode_go_demote_ok()
429 struct gfs2_glock *gl = gh->gh_gl; in inode_go_lock() local
430 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in inode_go_lock()
431 struct gfs2_inode *ip = gl->gl_object; in inode_go_lock()
447 (gl->gl_state == LM_ST_EXCLUSIVE) && in inode_go_lock()
468 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, in inode_go_dump() argument
471 struct gfs2_inode *ip = gl->gl_object; in inode_go_dump()
499 static void freeze_go_sync(struct gfs2_glock *gl) in freeze_go_sync() argument
502 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_sync()
504 if (gl->gl_state == LM_ST_SHARED && in freeze_go_sync()
525 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) in freeze_go_xmote_bh() argument
527 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in freeze_go_xmote_bh()
558 static int freeze_go_demote_ok(const struct gfs2_glock *gl) in freeze_go_demote_ok() argument
569 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) in iopen_go_callback() argument
571 struct gfs2_inode *ip = gl->gl_object; in iopen_go_callback()
572 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in iopen_go_callback()
577 if (gl->gl_demote_state == LM_ST_UNLOCKED && in iopen_go_callback()
578 gl->gl_state == LM_ST_SHARED && ip) { in iopen_go_callback()
579 gl->gl_lockref.count++; in iopen_go_callback()
580 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) in iopen_go_callback()
581 gl->gl_lockref.count--; in iopen_go_callback()