Home
last modified time | relevance | path

Searched refs:gl (Results 1 – 25 of 43) sorted by relevance

12

/Linux-v4.19/fs/gfs2/
Dglock.c57 struct gfs2_glock *gl; /* current glock struct */ member
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
119 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
131 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
132 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
134 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
[all …]
Dglops.c34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument
36 fs_err(gl->gl_name.ln_sbd, in gfs2_ail_error()
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error()
42 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
43 gfs2_glock2aspace(gl)); in gfs2_ail_error()
44 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); in gfs2_ail_error()
55 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
58 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
59 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
73 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
[all …]
Dglock.h131 void (*lm_put_lock) (struct gfs2_glock *gl);
132 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
134 void (*lm_cancel) (struct gfs2_glock *gl);
139 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument
145 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
147 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me()
155 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
160 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument
162 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl()
165 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument
[all …]
Dlock_dlm.c72 static inline void gfs2_update_reply_times(struct gfs2_glock *gl) in gfs2_update_reply_times() argument
75 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
76 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
81 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
82 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
83 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
87 trace_gfs2_glock_lock_time(gl, rtt); in gfs2_update_reply_times()
99 static inline void gfs2_update_request_times(struct gfs2_glock *gl) in gfs2_update_request_times() argument
102 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
107 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
[all …]
Dtrace_gfs2.h93 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
95 TP_ARGS(gl, new_state),
109 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
110 __entry->glnum = gl->gl_name.ln_number;
111 __entry->gltype = gl->gl_name.ln_type;
112 __entry->cur_state = glock_trace_state(gl->gl_state);
114 __entry->tgt_state = glock_trace_state(gl->gl_target);
115 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
116 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
132 TP_PROTO(const struct gfs2_glock *gl),
[all …]
Dmain.c53 struct gfs2_glock *gl = foo; in gfs2_init_glock_once() local
55 spin_lock_init(&gl->gl_lockref.lock); in gfs2_init_glock_once()
56 INIT_LIST_HEAD(&gl->gl_holders); in gfs2_init_glock_once()
57 INIT_LIST_HEAD(&gl->gl_lru); in gfs2_init_glock_once()
58 INIT_LIST_HEAD(&gl->gl_ail_list); in gfs2_init_glock_once()
59 atomic_set(&gl->gl_ail_count, 0); in gfs2_init_glock_once()
60 atomic_set(&gl->gl_revokes, 0); in gfs2_init_glock_once()
65 struct gfs2_glock *gl = foo; in gfs2_init_gl_aspace_once() local
66 struct address_space *mapping = (struct address_space *)(gl + 1); in gfs2_init_gl_aspace_once()
68 gfs2_init_glock_once(gl); in gfs2_init_gl_aspace_once()
Dmeta_io.c113 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) in gfs2_getbuf() argument
115 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf()
116 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_getbuf()
181 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) in gfs2_meta_new() argument
184 bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_new()
250 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, in gfs2_meta_read() argument
253 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_meta_read()
262 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_read()
275 bh = gfs2_getbuf(gl, blkno + 1, CREATE); in gfs2_meta_read()
409 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_indirect_buffer() local
[all …]
Dtrans.c126 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, in gfs2_alloc_bufdata() argument
134 bd->bd_gl = gl; in gfs2_alloc_bufdata()
155 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_data() argument
158 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_data()
172 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops); in gfs2_trans_add_data()
178 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_data()
192 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_meta() argument
195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_meta()
213 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops); in gfs2_trans_add_meta()
220 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_meta()
Dlops.c73 struct gfs2_glock *gl = bd->bd_gl; in maybe_release_space() local
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in maybe_release_space()
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in maybe_release_space()
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; in maybe_release_space()
119 struct gfs2_glock *gl = bd->bd_gl; in gfs2_unpin() local
120 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); in gfs2_unpin()
121 atomic_inc(&gl->gl_ail_count); in gfs2_unpin()
532 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() local
555 bh_ip = gfs2_meta_new(gl, blkno); in buf_lo_scan_elements()
581 static void gfs2_meta_sync(struct gfs2_glock *gl) in gfs2_meta_sync() argument
[all …]
Dincore.h40 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
241 void (*go_sync) (struct gfs2_glock *gl);
242 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
243 void (*go_inval) (struct gfs2_glock *gl, int flags);
244 int (*go_demote_ok) (const struct gfs2_glock *gl);
247 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
248 void (*go_callback)(struct gfs2_glock *gl, bool remote);
863 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) in gfs2_glstats_inc() argument
865 gl->gl_stats.stats[which]++; in gfs2_glstats_inc()
868 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) in gfs2_sbstats_inc() argument
[all …]
Dmeta_io.h54 extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
55 extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
58 extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
76 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
Dtrans.h44 extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
45 extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
Drgrp.c718 struct gfs2_glock *gl; in gfs2_clear_rgrpd() local
722 gl = rgd->rd_gl; in gfs2_clear_rgrpd()
726 if (gl) { in gfs2_clear_rgrpd()
727 glock_clear_object(gl, rgd); in gfs2_clear_rgrpd()
728 gfs2_glock_put(gl); in gfs2_clear_rgrpd()
1028 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() local
1035 if (!gfs2_glock_is_locked_by_me(gl)) { in gfs2_rindex_update()
1036 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); in gfs2_rindex_update()
1152 struct gfs2_glock *gl = rgd->rd_gl; in gfs2_rgrp_bh_get() local
1163 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh); in gfs2_rgrp_bh_get()
[all …]
Dsuper.c1328 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local
1329 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_drop_inode()
1341 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local
1343 gfs2_glock_hold(gl); in gfs2_drop_inode()
1344 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) in gfs2_drop_inode()
1345 gfs2_glock_queue_put(gl); in gfs2_drop_inode()
1472 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages() local
1477 if (atomic_read(&gl->gl_revokes) == 0) { in gfs2_final_release_pages()
1478 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_final_release_pages()
1479 clear_bit(GLF_DIRTY, &gl->gl_flags); in gfs2_final_release_pages()
[all …]
Dlog.c100 struct gfs2_glock *gl = NULL; in gfs2_ail1_start_one() local
121 if (gl == bd->bd_gl) in gfs2_ail1_start_one()
123 gl = bd->bd_gl; in gfs2_ail1_start_one()
599 struct gfs2_glock *gl = bd->bd_gl; in gfs2_add_revoke() local
607 atomic_inc(&gl->gl_revokes); in gfs2_add_revoke()
608 set_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_add_revoke()
776 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) in gfs2_log_flush() argument
784 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { in gfs2_log_flush()
Dglops.h28 extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
Dlog.h75 extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
Dsys.c231 struct gfs2_glock *gl; in demote_rq_store() local
266 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); in demote_rq_store()
269 gfs2_glock_cb(gl, glmode); in demote_rq_store()
270 gfs2_glock_put(gl); in demote_rq_store()
/Linux-v4.19/drivers/target/iscsi/cxgbit/
Dcxgbit_main.c214 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, in cxgbit_copy_frags() argument
221 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, in cxgbit_copy_frags()
222 gl->frags[0].offset + offset, in cxgbit_copy_frags()
223 gl->frags[0].size - offset); in cxgbit_copy_frags()
224 for (i = 1; i < gl->nfrags; i++) in cxgbit_copy_frags()
226 gl->frags[i].page, in cxgbit_copy_frags()
227 gl->frags[i].offset, in cxgbit_copy_frags()
228 gl->frags[i].size); in cxgbit_copy_frags()
230 skb_shinfo(skb)->nr_frags += gl->nfrags; in cxgbit_copy_frags()
233 get_page(gl->frags[gl->nfrags - 1].page); in cxgbit_copy_frags()
[all …]
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1473 const struct pkt_gl *gl, in copy_frags() argument
1479 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
1480 gl->frags[0].offset + offset, in copy_frags()
1481 gl->frags[0].size - offset); in copy_frags()
1482 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
1483 for (i = 1; i < gl->nfrags; i++) in copy_frags()
1484 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
1485 gl->frags[i].offset, in copy_frags()
1486 gl->frags[i].size); in copy_frags()
1489 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
[all …]
/Linux-v4.19/drivers/crypto/chelsio/chtls/
Dchtls_main.c316 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, in copy_gl_to_skb_pkt() argument
326 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) in copy_gl_to_skb_pkt()
330 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) in copy_gl_to_skb_pkt()
335 , gl->va + pktshift, in copy_gl_to_skb_pkt()
336 gl->tot_len - pktshift); in copy_gl_to_skb_pkt()
342 const struct pkt_gl *gl, const __be64 *rsp) in chtls_recv_packet() argument
348 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); in chtls_recv_packet()
416 const struct pkt_gl *gl) in chtls_uld_rx_handler() argument
425 if (chtls_recv_packet(cdev, gl, rsp) < 0) in chtls_uld_rx_handler()
430 if (!gl) in chtls_uld_rx_handler()
[all …]
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c2541 const struct pkt_gl *gl, unsigned int offset) in copy_frags() argument
2546 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
2547 gl->frags[0].offset + offset, in copy_frags()
2548 gl->frags[0].size - offset); in copy_frags()
2549 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
2550 for (i = 1; i < gl->nfrags; i++) in copy_frags()
2551 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
2552 gl->frags[i].offset, in copy_frags()
2553 gl->frags[i].size); in copy_frags()
2556 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
[all …]
Dcxgb4_uld.h376 const struct pkt_gl *gl);
380 const struct pkt_gl *gl,
409 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
/Linux-v4.19/drivers/infiniband/hw/cxgb4/
Ddevice.c1096 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, in copy_gl_to_skb_pkt() argument
1109 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt()
1114 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt()
1129 gl->va + pktshift, in copy_gl_to_skb_pkt()
1130 gl->tot_len - pktshift); in copy_gl_to_skb_pkt()
1134 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, in recv_rx_pkt() argument
1143 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); in recv_rx_pkt()
1159 const struct pkt_gl *gl) in c4iw_uld_rx_handler() argument
1166 if (gl == NULL) { in c4iw_uld_rx_handler()
1175 } else if (gl == CXGB4_MSG_AN) { in c4iw_uld_rx_handler()
[all …]
/Linux-v4.19/drivers/iommu/
Dintel-svm.c157 unsigned long address, unsigned long pages, int ih, int gl) in intel_flush_svm_range_dev() argument
165 if (gl) in intel_flush_svm_range_dev()
177 desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) | in intel_flush_svm_range_dev()
203 unsigned long pages, int ih, int gl) in intel_flush_svm_range() argument
214 intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); in intel_flush_svm_range()

12