Lines Matching refs:mdsc
45 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
46 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
127 void ceph_caps_init(struct ceph_mds_client *mdsc) in ceph_caps_init() argument
129 INIT_LIST_HEAD(&mdsc->caps_list); in ceph_caps_init()
130 spin_lock_init(&mdsc->caps_list_lock); in ceph_caps_init()
133 void ceph_caps_finalize(struct ceph_mds_client *mdsc) in ceph_caps_finalize() argument
137 spin_lock(&mdsc->caps_list_lock); in ceph_caps_finalize()
138 while (!list_empty(&mdsc->caps_list)) { in ceph_caps_finalize()
139 cap = list_first_entry(&mdsc->caps_list, in ceph_caps_finalize()
144 mdsc->caps_total_count = 0; in ceph_caps_finalize()
145 mdsc->caps_avail_count = 0; in ceph_caps_finalize()
146 mdsc->caps_use_count = 0; in ceph_caps_finalize()
147 mdsc->caps_reserve_count = 0; in ceph_caps_finalize()
148 mdsc->caps_min_count = 0; in ceph_caps_finalize()
149 spin_unlock(&mdsc->caps_list_lock); in ceph_caps_finalize()
152 void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc, in ceph_adjust_caps_max_min() argument
155 spin_lock(&mdsc->caps_list_lock); in ceph_adjust_caps_max_min()
156 mdsc->caps_min_count = fsopt->max_readdir; in ceph_adjust_caps_max_min()
157 if (mdsc->caps_min_count < 1024) in ceph_adjust_caps_max_min()
158 mdsc->caps_min_count = 1024; in ceph_adjust_caps_max_min()
159 mdsc->caps_use_max = fsopt->caps_max; in ceph_adjust_caps_max_min()
160 if (mdsc->caps_use_max > 0 && in ceph_adjust_caps_max_min()
161 mdsc->caps_use_max < mdsc->caps_min_count) in ceph_adjust_caps_max_min()
162 mdsc->caps_use_max = mdsc->caps_min_count; in ceph_adjust_caps_max_min()
163 spin_unlock(&mdsc->caps_list_lock); in ceph_adjust_caps_max_min()
166 static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps) in __ceph_unreserve_caps() argument
172 BUG_ON(mdsc->caps_reserve_count < nr_caps); in __ceph_unreserve_caps()
173 mdsc->caps_reserve_count -= nr_caps; in __ceph_unreserve_caps()
174 if (mdsc->caps_avail_count >= in __ceph_unreserve_caps()
175 mdsc->caps_reserve_count + mdsc->caps_min_count) { in __ceph_unreserve_caps()
176 mdsc->caps_total_count -= nr_caps; in __ceph_unreserve_caps()
178 cap = list_first_entry(&mdsc->caps_list, in __ceph_unreserve_caps()
184 mdsc->caps_avail_count += nr_caps; in __ceph_unreserve_caps()
189 mdsc->caps_total_count, mdsc->caps_use_count, in __ceph_unreserve_caps()
190 mdsc->caps_reserve_count, mdsc->caps_avail_count); in __ceph_unreserve_caps()
191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in __ceph_unreserve_caps()
192 mdsc->caps_reserve_count + in __ceph_unreserve_caps()
193 mdsc->caps_avail_count); in __ceph_unreserve_caps()
200 int ceph_reserve_caps(struct ceph_mds_client *mdsc, in ceph_reserve_caps() argument
216 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
217 if (mdsc->caps_avail_count >= need) in ceph_reserve_caps()
220 have = mdsc->caps_avail_count; in ceph_reserve_caps()
221 mdsc->caps_avail_count -= have; in ceph_reserve_caps()
222 mdsc->caps_reserve_count += have; in ceph_reserve_caps()
223 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_reserve_caps()
224 mdsc->caps_reserve_count + in ceph_reserve_caps()
225 mdsc->caps_avail_count); in ceph_reserve_caps()
226 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
238 for (j = 0; j < mdsc->max_sessions; j++) { in ceph_reserve_caps()
239 s = __ceph_lookup_mds_session(mdsc, j); in ceph_reserve_caps()
242 mutex_unlock(&mdsc->mutex); in ceph_reserve_caps()
246 ceph_trim_caps(mdsc, s, max_caps); in ceph_reserve_caps()
250 mutex_lock(&mdsc->mutex); in ceph_reserve_caps()
254 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
255 if (mdsc->caps_avail_count) { in ceph_reserve_caps()
257 if (mdsc->caps_avail_count >= need - i) in ceph_reserve_caps()
260 more_have = mdsc->caps_avail_count; in ceph_reserve_caps()
264 mdsc->caps_avail_count -= more_have; in ceph_reserve_caps()
265 mdsc->caps_reserve_count += more_have; in ceph_reserve_caps()
268 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
285 spin_lock(&mdsc->caps_list_lock); in ceph_reserve_caps()
286 mdsc->caps_total_count += alloc; in ceph_reserve_caps()
287 mdsc->caps_reserve_count += alloc; in ceph_reserve_caps()
288 list_splice(&newcaps, &mdsc->caps_list); in ceph_reserve_caps()
290 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_reserve_caps()
291 mdsc->caps_reserve_count + in ceph_reserve_caps()
292 mdsc->caps_avail_count); in ceph_reserve_caps()
295 __ceph_unreserve_caps(mdsc, have + alloc); in ceph_reserve_caps()
297 spin_unlock(&mdsc->caps_list_lock); in ceph_reserve_caps()
300 ctx, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_reserve_caps()
301 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_reserve_caps()
305 void ceph_unreserve_caps(struct ceph_mds_client *mdsc, in ceph_unreserve_caps() argument
313 spin_lock(&mdsc->caps_list_lock); in ceph_unreserve_caps()
314 __ceph_unreserve_caps(mdsc, ctx->count); in ceph_unreserve_caps()
317 if (mdsc->caps_use_max > 0 && in ceph_unreserve_caps()
318 mdsc->caps_use_count > mdsc->caps_use_max) in ceph_unreserve_caps()
320 spin_unlock(&mdsc->caps_list_lock); in ceph_unreserve_caps()
323 ceph_reclaim_caps_nr(mdsc, ctx->used); in ceph_unreserve_caps()
326 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, in ceph_get_cap() argument
335 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
336 mdsc->caps_use_count++; in ceph_get_cap()
337 mdsc->caps_total_count++; in ceph_get_cap()
338 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
340 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
341 if (mdsc->caps_avail_count) { in ceph_get_cap()
342 BUG_ON(list_empty(&mdsc->caps_list)); in ceph_get_cap()
344 mdsc->caps_avail_count--; in ceph_get_cap()
345 mdsc->caps_use_count++; in ceph_get_cap()
346 cap = list_first_entry(&mdsc->caps_list, in ceph_get_cap()
350 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_get_cap()
351 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_get_cap()
353 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
359 spin_lock(&mdsc->caps_list_lock); in ceph_get_cap()
361 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_get_cap()
362 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_get_cap()
364 BUG_ON(ctx->count > mdsc->caps_reserve_count); in ceph_get_cap()
365 BUG_ON(list_empty(&mdsc->caps_list)); in ceph_get_cap()
369 mdsc->caps_reserve_count--; in ceph_get_cap()
370 mdsc->caps_use_count++; in ceph_get_cap()
372 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); in ceph_get_cap()
375 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_get_cap()
376 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_get_cap()
377 spin_unlock(&mdsc->caps_list_lock); in ceph_get_cap()
381 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) in ceph_put_cap() argument
383 spin_lock(&mdsc->caps_list_lock); in ceph_put_cap()
385 cap, mdsc->caps_total_count, mdsc->caps_use_count, in ceph_put_cap()
386 mdsc->caps_reserve_count, mdsc->caps_avail_count); in ceph_put_cap()
387 mdsc->caps_use_count--; in ceph_put_cap()
392 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + in ceph_put_cap()
393 mdsc->caps_min_count) { in ceph_put_cap()
394 mdsc->caps_total_count--; in ceph_put_cap()
397 mdsc->caps_avail_count++; in ceph_put_cap()
398 list_add(&cap->caps_item, &mdsc->caps_list); in ceph_put_cap()
401 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + in ceph_put_cap()
402 mdsc->caps_reserve_count + mdsc->caps_avail_count); in ceph_put_cap()
403 spin_unlock(&mdsc->caps_list_lock); in ceph_put_cap()
410 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_reservation_status() local
412 spin_lock(&mdsc->caps_list_lock); in ceph_reservation_status()
415 *total = mdsc->caps_total_count; in ceph_reservation_status()
417 *avail = mdsc->caps_avail_count; in ceph_reservation_status()
419 *used = mdsc->caps_use_count; in ceph_reservation_status()
421 *reserved = mdsc->caps_reserve_count; in ceph_reservation_status()
423 *min = mdsc->caps_min_count; in ceph_reservation_status()
425 spin_unlock(&mdsc->caps_list_lock); in ceph_reservation_status()
489 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, in __cap_set_timeouts() argument
492 struct ceph_mount_options *opt = mdsc->fsc->mount_options; in __cap_set_timeouts()
507 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, in __cap_delay_requeue() argument
512 if (!mdsc->stopping) { in __cap_delay_requeue()
513 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_requeue()
519 __cap_set_timeouts(mdsc, ci); in __cap_delay_requeue()
520 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue()
522 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_requeue()
531 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, in __cap_delay_requeue_front() argument
535 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_requeue_front()
539 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue_front()
540 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_requeue_front()
548 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, in __cap_delay_cancel() argument
554 spin_lock(&mdsc->cap_delay_lock); in __cap_delay_cancel()
556 spin_unlock(&mdsc->cap_delay_lock); in __cap_delay_cancel()
636 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in ceph_add_cap() local
669 atomic64_inc(&mdsc->metric.total_caps); in ceph_add_cap()
704 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, in ceph_add_cap()
727 __cap_delay_requeue(mdsc, ci); in ceph_add_cap()
928 ceph_update_cap_hit(&fsc->mdsc->metric); in __ceph_caps_issued_mask_metric()
930 ceph_update_cap_mis(&fsc->mdsc->metric); in __ceph_caps_issued_mask_metric()
1109 struct ceph_mds_client *mdsc; in __ceph_remove_cap() local
1122 mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc; in __ceph_remove_cap()
1138 atomic64_dec(&mdsc->metric.total_caps); in __ceph_remove_cap()
1165 ceph_put_cap(mdsc, cap); in __ceph_remove_cap()
1175 __cap_delay_cancel(mdsc, ci); in __ceph_remove_cap()
1533 struct ceph_mds_client *mdsc = session->s_mdsc; in __ceph_flush_snaps() local
1557 spin_lock(&mdsc->cap_dirty_lock); in __ceph_flush_snaps()
1558 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid; in __ceph_flush_snaps()
1560 &mdsc->cap_flush_list); in __ceph_flush_snaps()
1562 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in __ceph_flush_snaps()
1567 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_flush_snaps()
1627 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in ceph_flush_snaps() local
1653 mutex_lock(&mdsc->mutex); in ceph_flush_snaps()
1654 session = __ceph_lookup_mds_session(mdsc, mds); in ceph_flush_snaps()
1655 mutex_unlock(&mdsc->mutex); in ceph_flush_snaps()
1661 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_flush_snaps()
1672 spin_lock(&mdsc->snap_flush_lock); in ceph_flush_snaps()
1674 spin_unlock(&mdsc->snap_flush_lock); in ceph_flush_snaps()
1685 struct ceph_mds_client *mdsc = in __ceph_mark_dirty_caps() local
1686 ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc; in __ceph_mark_dirty_caps()
1711 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem)); in __ceph_mark_dirty_caps()
1718 spin_lock(&mdsc->cap_dirty_lock); in __ceph_mark_dirty_caps()
1720 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_mark_dirty_caps()
1732 __cap_delay_requeue(mdsc, ci); in __ceph_mark_dirty_caps()
1754 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc) in __get_oldest_flush_tid() argument
1756 if (!list_empty(&mdsc->cap_flush_list)) { in __get_oldest_flush_tid()
1758 list_first_entry(&mdsc->cap_flush_list, in __get_oldest_flush_tid()
1769 static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc, in __detach_cap_flush_from_mdsc() argument
1775 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) { in __detach_cap_flush_from_mdsc()
1809 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in __mark_caps_flushing() local
1832 spin_lock(&mdsc->cap_dirty_lock); in __mark_caps_flushing()
1835 cf->tid = ++mdsc->last_cap_flush_tid; in __mark_caps_flushing()
1836 list_add_tail(&cf->g_list, &mdsc->cap_flush_list); in __mark_caps_flushing()
1837 *oldest_flush_tid = __get_oldest_flush_tid(mdsc); in __mark_caps_flushing()
1841 mdsc->num_cap_flushing++; in __mark_caps_flushing()
1843 spin_unlock(&mdsc->cap_dirty_lock); in __mark_caps_flushing()
1905 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); in ceph_check_caps() local
1950 if (!mdsc->stopping && inode->i_nlink > 0) { in ceph_check_caps()
1997 if ((!(flags & CHECK_CAPS_NOINVAL) || mdsc->stopping) && in ceph_check_caps()
2114 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_check_caps()
2131 spin_lock(&mdsc->cap_dirty_lock); in ceph_check_caps()
2132 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_check_caps()
2133 spin_unlock(&mdsc->cap_dirty_lock); in ceph_check_caps()
2153 __cap_delay_requeue(mdsc, ci); in ceph_check_caps()
2170 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in try_flush_caps() local
2190 __kick_flushing_caps(mdsc, session, ci, 0); in try_flush_caps()
2248 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in flush_mdlog_and_wait_inode_unsafe_requests() local
2280 mutex_lock(&mdsc->mutex); in flush_mdlog_and_wait_inode_unsafe_requests()
2281 max_sessions = mdsc->max_sessions; in flush_mdlog_and_wait_inode_unsafe_requests()
2285 mutex_unlock(&mdsc->mutex); in flush_mdlog_and_wait_inode_unsafe_requests()
2325 mutex_unlock(&mdsc->mutex); in flush_mdlog_and_wait_inode_unsafe_requests()
2430 struct ceph_mds_client *mdsc = in ceph_write_inode() local
2431 ceph_sb_to_client(inode->i_sb)->mdsc; in ceph_write_inode()
2435 __cap_delay_requeue_front(mdsc, ci); in ceph_write_inode()
2441 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, in __kick_flushing_caps() argument
2523 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, in ceph_early_kick_flushing_caps() argument
2532 spin_lock(&mdsc->cap_dirty_lock); in ceph_early_kick_flushing_caps()
2533 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_early_kick_flushing_caps()
2534 spin_unlock(&mdsc->cap_dirty_lock); in ceph_early_kick_flushing_caps()
2561 __kick_flushing_caps(mdsc, session, ci, in ceph_early_kick_flushing_caps()
2571 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, in ceph_kick_flushing_caps() argument
2582 spin_lock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_caps()
2583 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_kick_flushing_caps()
2584 spin_unlock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_caps()
2596 __kick_flushing_caps(mdsc, session, ci, in ceph_kick_flushing_caps()
2606 struct ceph_mds_client *mdsc = session->s_mdsc; in ceph_kick_flushing_inode_caps() local
2616 spin_lock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_inode_caps()
2619 oldest_flush_tid = __get_oldest_flush_tid(mdsc); in ceph_kick_flushing_inode_caps()
2620 spin_unlock(&mdsc->cap_dirty_lock); in ceph_kick_flushing_inode_caps()
2622 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); in ceph_kick_flushing_inode_caps()
2684 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in try_get_cap_refs() local
2706 up_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2753 if (!down_read_trylock(&mdsc->snap_rwsem)) { in try_get_cap_refs()
2764 down_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2813 __ceph_touch_fmode(ci, mdsc, flags); in try_get_cap_refs()
2817 up_read(&mdsc->snap_rwsem); in try_get_cap_refs()
2820 ceph_update_cap_mis(&mdsc->metric); in try_get_cap_refs()
2822 ceph_update_cap_hit(&mdsc->metric); in try_get_cap_refs()
2925 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_get_caps() local
2934 spin_lock(&mdsc->caps_list_lock); in ceph_get_caps()
2935 list_add(&cw.list, &mdsc->cap_wait_list); in ceph_get_caps()
2936 spin_unlock(&mdsc->caps_list_lock); in ceph_get_caps()
2955 spin_lock(&mdsc->caps_list_lock); in ceph_get_caps()
2957 spin_unlock(&mdsc->caps_list_lock); in ceph_get_caps()
3624 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in handle_cap_flush_ack() local
3672 spin_lock(&mdsc->cap_dirty_lock); in handle_cap_flush_ack()
3675 wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc, cf); in handle_cap_flush_ack()
3688 mdsc->num_cap_flushing--; in handle_cap_flush_ack()
3705 spin_unlock(&mdsc->cap_dirty_lock); in handle_cap_flush_ack()
3721 wake_up_all(&mdsc->cap_flushing_wq); in handle_cap_flush_ack()
3730 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in __ceph_remove_capsnap() local
3742 spin_lock(&mdsc->cap_dirty_lock); in __ceph_remove_capsnap()
3746 ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush); in __ceph_remove_capsnap()
3749 spin_unlock(&mdsc->cap_dirty_lock); in __ceph_remove_capsnap()
3774 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; in handle_cap_flushsnap_ack() local
3809 wake_up_all(&mdsc->cap_flushing_wq); in handle_cap_flushsnap_ack()
3857 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; in handle_cap_export() local
3880 down_read(&mdsc->snap_rwsem); in handle_cap_export()
3933 spin_lock(&mdsc->cap_dirty_lock); in handle_cap_export()
3936 spin_unlock(&mdsc->cap_dirty_lock); in handle_cap_export()
3944 up_read(&mdsc->snap_rwsem); in handle_cap_export()
3948 tsession = ceph_mdsc_open_export_target_session(mdsc, target); in handle_cap_export()
3959 new_cap = ceph_get_cap(mdsc, NULL); in handle_cap_export()
3970 up_read(&mdsc->snap_rwsem); in handle_cap_export()
3977 ceph_put_cap(mdsc, new_cap); in handle_cap_export()
3985 static void handle_cap_import(struct ceph_mds_client *mdsc, in handle_cap_import() argument
4019 new_cap = ceph_get_cap(mdsc, NULL); in handle_cap_import()
4026 ceph_put_cap(mdsc, new_cap); in handle_cap_import()
4068 struct ceph_mds_client *mdsc = session->s_mdsc; in ceph_handle_caps() local
4132 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; in ceph_handle_caps()
4178 inode = ceph_find_inode(mdsc->fsc->sb, vino); in ceph_handle_caps()
4191 cap = ceph_get_cap(mdsc, NULL); in ceph_handle_caps()
4220 down_write(&mdsc->snap_rwsem); in ceph_handle_caps()
4221 ceph_update_snap_trace(mdsc, snaptrace, in ceph_handle_caps()
4224 downgrade_write(&mdsc->snap_rwsem); in ceph_handle_caps()
4226 down_read(&mdsc->snap_rwsem); in ceph_handle_caps()
4229 handle_cap_import(mdsc, inode, h, peer, session, in ceph_handle_caps()
4234 ceph_put_snap_realm(mdsc, realm); in ceph_handle_caps()
4291 ceph_flush_cap_releases(mdsc, session); in ceph_handle_caps()
4307 unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) in ceph_check_delayed_caps() argument
4311 struct ceph_mount_options *opt = mdsc->fsc->mount_options; in ceph_check_delayed_caps()
4317 spin_lock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4318 while (!list_empty(&mdsc->cap_delay_list)) { in ceph_check_delayed_caps()
4319 ci = list_first_entry(&mdsc->cap_delay_list, in ceph_check_delayed_caps()
4334 spin_unlock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4338 spin_lock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4341 spin_unlock(&mdsc->cap_delay_lock); in ceph_check_delayed_caps()
4351 struct ceph_mds_client *mdsc = s->s_mdsc; in flush_dirty_session_caps() local
4356 spin_lock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4363 spin_unlock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4367 spin_lock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4369 spin_unlock(&mdsc->cap_dirty_lock); in flush_dirty_session_caps()
4373 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) in ceph_flush_dirty_caps() argument
4375 ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true); in ceph_flush_dirty_caps()
4379 struct ceph_mds_client *mdsc, int fmode) in __ceph_touch_fmode() argument
4390 __cap_delay_requeue(mdsc, ci); in __ceph_touch_fmode()
4395 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_get_fmode() local
4401 atomic64_inc(&mdsc->metric.opened_files); in ceph_get_fmode()
4418 percpu_counter_inc(&mdsc->metric.opened_inodes); in ceph_get_fmode()
4429 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_put_fmode() local
4435 atomic64_dec(&mdsc->metric.opened_files); in ceph_put_fmode()
4454 percpu_counter_dec(&mdsc->metric.opened_inodes); in ceph_put_fmode()
4474 struct ceph_mds_client *mdsc = in ceph_drop_caps_for_unlink() local
4475 ceph_inode_to_client(inode)->mdsc; in ceph_drop_caps_for_unlink()
4476 __cap_delay_requeue_front(mdsc, ci); in ceph_drop_caps_for_unlink()
4609 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode) in remove_capsnaps() argument
4628 wake_up_all(&mdsc->cap_flushing_wq); in remove_capsnaps()
4635 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_purge_inode_cap() local
4658 spin_lock(&mdsc->cap_dirty_lock); in ceph_purge_inode_cap()
4686 mdsc->num_cap_flushing--; in ceph_purge_inode_cap()
4689 spin_unlock(&mdsc->cap_dirty_lock); in ceph_purge_inode_cap()
4718 iputs = remove_capsnaps(mdsc, inode); in ceph_purge_inode_cap()